text
stringlengths 7
328k
| id
stringlengths 14
166
| metadata
dict | __index_level_0__
int64 0
459
|
---|---|---|---|
use candle::{DType, Device, Error as E, IndexOp, Module, Result, Tensor, D};
use candle_nn::{embedding, linear_b, rms_norm, Embedding, Linear, RmsNorm, VarBuilder};
// Equivalent to torch.repeat_interleave
pub(crate) fn repeat_interleave(img: &Tensor, repeats: usize, dim: usize) -> Result<Tensor> {
let img = img.unsqueeze(dim + 1)?;
let mut dims = img.dims().to_vec();
dims[dim + 1] = repeats;
img.broadcast_as(dims)?.flatten(dim, dim + 1)
}
pub mod speaker_encoder {
use super::*;
#[derive(Debug, Clone, serde::Deserialize)]
pub struct Config {
pub sampling_rate: usize,
pub partial_n_frames: usize,
pub model_hidden_size: usize,
pub model_embedding_size: usize,
pub model_num_layers: usize,
pub mel_window_length: usize,
pub mel_window_step: usize,
pub mel_n_channels: usize,
}
impl Config {
pub fn cfg() -> Self {
Self {
sampling_rate: 16_000,
partial_n_frames: 160,
model_hidden_size: 256,
model_embedding_size: 256,
model_num_layers: 3,
mel_window_length: 25,
mel_window_step: 10,
mel_n_channels: 40,
}
}
}
pub struct Model {
lstms: Vec<candle_nn::LSTM>,
linear: Linear,
cfg: Config,
}
type Slice = (usize, usize);
impl Model {
pub fn new(cfg: Config, vb: VarBuilder) -> Result<Self> {
let mut lstms = Vec::with_capacity(cfg.model_num_layers);
let vb_l = vb.pp("lstm");
for layer_idx in 0..cfg.model_num_layers {
let c = candle_nn::LSTMConfig {
layer_idx,
..Default::default()
};
let lstm = candle_nn::lstm(
cfg.mel_n_channels,
cfg.model_hidden_size,
c,
vb_l.pp(layer_idx),
)?;
lstms.push(lstm)
}
let linear = linear_b(
cfg.model_hidden_size,
cfg.model_embedding_size,
true,
vb.pp("linear"),
)?;
Ok(Self { lstms, linear, cfg })
}
fn compute_partial_slices(
&self,
n_samples: usize,
rate: f64,
min_coverage: f64,
) -> (Vec<Slice>, Vec<Slice>) {
let c = &self.cfg;
// Compute how many frames separate two partial utterances
let samples_per_frame = c.sampling_rate * c.mel_window_step / 1000;
let n_frames = n_samples / samples_per_frame + 1;
let frame_step =
(c.sampling_rate as f64 / rate / samples_per_frame as f64).round() as usize;
let steps = (n_frames + frame_step).saturating_sub(c.partial_n_frames) + 1;
// Compute the slices.
let mut wav_slices = vec![];
let mut mel_slices = vec![];
for i in (0..steps).step_by(frame_step) {
let mel_range = (i, i + c.partial_n_frames);
let wav_range = (
i * samples_per_frame,
(i + c.partial_n_frames) * samples_per_frame,
);
mel_slices.push(mel_range);
wav_slices.push(wav_range);
}
// Evaluate whether extra padding is warranted or not.
let last_wav_range = match wav_slices.last() {
None => return (wav_slices, mel_slices),
Some(l) => *l,
};
let coverage = (n_samples - last_wav_range.0) as f64
/ (last_wav_range.1 - last_wav_range.0) as f64;
if coverage > min_coverage && mel_slices.len() > 1 {
mel_slices.pop();
wav_slices.pop();
}
(wav_slices, mel_slices)
}
pub fn embed_utterance(
&self,
wav: &[f32],
mel_filters: &[f32],
rate: f64,
min_c: f64,
device: &Device,
) -> Result<Tensor> {
let (wav_slices, mel_slices) = self.compute_partial_slices(wav.len(), rate, min_c);
let max_wave_length = match wav_slices.last() {
Some(v) => v.1,
None => candle::bail!("empty wav slices"),
};
let wav = if max_wave_length > wav.len() {
let mut wav = wav.to_vec();
wav.resize(max_wave_length - wav.len(), 0.0);
std::borrow::Cow::Owned(wav)
} else {
std::borrow::Cow::Borrowed(wav)
};
let mel = crate::models::whisper::audio::log_mel_spectrogram_(
wav.as_ref(),
mel_filters,
/* fft_size */ self.cfg.mel_window_length,
/* fft_step */ self.cfg.mel_window_step,
self.cfg.mel_n_channels,
false,
);
let mels = mel_slices
.iter()
.flat_map(|s| [mel[s.0], mel[s.1]])
.collect::<Vec<_>>();
let mels = Tensor::from_vec(mels, (mel_slices.len(), 2), device)?;
let partial_embeds = self.forward(&mels)?;
let raw_embed = partial_embeds.mean(0)?;
let norm = raw_embed.sqr()?.sum_all()?.sqrt()?;
raw_embed.broadcast_div(&norm)
}
}
impl Module for Model {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
use candle_nn::RNN;
// This is different from the Python transformers version as candle LSTM is batch first.
let xs = xs.t()?;
let mut xs = xs.clone();
for layer in self.lstms.iter() {
let states = layer.seq(&xs)?;
xs = layer.states_to_tensor(&states)?;
}
let xs = xs.t()?;
let embeds_raw = xs.apply(&self.linear)?.relu()?;
let norm = embeds_raw.sqr()?.sum_keepdim(1)?.sqrt()?;
embeds_raw.broadcast_div(&norm)
}
}
}
type Rank = u32;
pub mod tokenizers {
use super::*;
use std::collections::HashMap;
pub struct BPE {
pub re: fancy_regex::Regex,
pub end_of_text: usize,
pub offset: usize,
pub ranks: HashMap<Vec<u8>, Rank>,
span: tracing::Span,
}
impl BPE {
pub fn from_json(json: &serde_json::Value, end_of_text: usize) -> Result<Self> {
let json = match json.as_object() {
None => candle::bail!("json value is not an object"),
Some(json) => json,
};
let re = match json.get("pat_str") {
None => candle::bail!("json object has no pat_str field"),
Some(pat_str) => match pat_str.as_str() {
None => candle::bail!("pat_str field is not a string"),
Some(pat_str) => fancy_regex::Regex::new(pat_str).map_err(E::wrap)?,
},
};
let offset = match json.get("offset") {
None => candle::bail!("json object has no offset field"),
Some(offset) => match offset.as_u64() {
None => candle::bail!("offset field is not a positive int"),
Some(offset) => offset as usize,
},
};
let mut ranks = HashMap::new();
for id in 0u8..=255 {
ranks.insert(vec![id], id as u32);
}
let mergeable_ranks = match json.get("mergeable_ranks") {
None => candle::bail!("json object has no mergeable_ranks field"),
Some(mr) => match mr.as_object() {
None => candle::bail!("mergeable_ranks is not an object"),
Some(mr) => mr,
},
};
for (key, value) in mergeable_ranks.iter() {
let value = match value.as_u64() {
None => candle::bail!("mergeable_ranks '{key}' is not a u64"),
Some(value) => value as u32,
};
if value < 256 {
continue;
}
// No escaping for other keys.
let key = key.as_bytes().to_vec();
ranks.insert(key, value);
}
Ok(Self {
re,
end_of_text,
offset,
ranks,
span: tracing::span!(tracing::Level::TRACE, "bpe"),
})
}
// Taken from:
// https://github.com/openai/tiktoken/blob/1b9faf2779855124f05174adf1383e53689ed94b/src/lib.rs#L16C1-L82C2
fn _byte_pair_merge(&self, piece: &[u8]) -> Vec<(usize, Rank)> {
// This is a vector of (start, rank).
// The rank is of the pair starting at position start.
let mut parts = Vec::with_capacity(piece.len() + 1);
// Note that we hash bytes when indexing into `ranks`, not token pairs. As long as we train BPE
// the way we currently do, this is equivalent. An easy way to break this would be to decouple
// merge priority from token index or to prevent specific token merges.
let mut min_rank: (Rank, usize) = (Rank::MAX, usize::MAX);
for i in 0..piece.len() - 1 {
let rank = *self.ranks.get(&piece[i..i + 2]).unwrap_or(&Rank::MAX);
if rank < min_rank.0 {
min_rank = (rank, i);
}
parts.push((i, rank));
}
parts.push((piece.len() - 1, Rank::MAX));
parts.push((piece.len(), Rank::MAX));
let get_rank = {
#[inline(always)]
|parts: &Vec<(usize, Rank)>, i: usize| {
if (i + 3) < parts.len() {
// Similar to `piece[i..i + 2]` above. The +3 is because we haven't yet deleted
// parts[i + 1], see comment in the main loop.
*self
.ranks
.get(&piece[parts[i].0..parts[i + 3].0])
.unwrap_or(&Rank::MAX)
} else {
Rank::MAX
}
}
};
// If you have n parts and m merges, this does O(mn) work.
// We could do something with a heap and do O(m log n) work.
// n is often very small so considerations like cache-locality outweigh the algorithmic
// complexity downsides of the `parts` vector.
while min_rank.0 != Rank::MAX {
let i = min_rank.1;
// Update parts[i] and parts[i - 1] before removing parts[i + 1], since
// `parts.remove(i + 1)` will thrash the cache.
if i > 0 {
parts[i - 1].1 = get_rank(&parts, i - 1);
}
parts[i].1 = get_rank(&parts, i);
parts.remove(i + 1);
min_rank = (Rank::MAX, usize::MAX);
for (i, &(_, rank)) in parts[..parts.len() - 1].iter().enumerate() {
if rank < min_rank.0 {
min_rank = (rank, i);
}
}
}
parts
}
pub fn byte_pair_encode(&self, piece: &[u8]) -> Vec<Rank> {
if piece.is_empty() {
return Vec::new();
}
if piece.len() == 1 {
return vec![self.ranks[piece]];
}
assert!(piece.len() > 1);
self._byte_pair_merge(piece)
.windows(2)
.map(|part| self.ranks[&piece[part[0].0..part[1].0]])
.collect()
}
pub fn encode(&self, text: &str) -> Result<Vec<u32>> {
let _enter = self.span.enter();
let mut bpe_tokens: Vec<u32> = Vec::new();
for word in self.re.find_iter(text) {
let word = word.map_err(E::wrap)?;
let word_tokens = self.byte_pair_encode(word.as_str().as_bytes());
for &token in word_tokens.iter() {
bpe_tokens.push(token + self.offset as u32)
}
}
bpe_tokens.push((self.end_of_text + self.offset) as u32);
Ok(bpe_tokens)
}
}
}
pub mod gpt {
use super::*;
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
pub enum NormType {
LayerNorm,
RMSNorm,
}
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
pub enum AttnKernelType {
Fa2,
TorchAttn,
Hand,
}
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
pub enum NonLinearityType {
Gelu,
Swiglu,
}
enum Norm {
RMSNorm(candle_nn::RmsNorm),
LayerNorm(candle_nn::LayerNorm),
}
// https://github.com/metavoiceio/metavoice-src/blob/11550bb4e8a1ad032cc1556cc924f7a4e767cbfa/fam/llm/model.py#L27
#[derive(Debug, Clone)]
pub struct Config {
pub block_size: usize,
pub vocab_sizes: Vec<usize>,
pub target_vocab_sizes: Vec<usize>,
pub n_layer: usize,
pub n_head: usize,
pub n_embd: usize,
pub bias: bool,
pub causal: bool,
pub spk_emb_on_text: bool,
pub norm_type: NormType,
pub rmsnorm_eps: f64,
pub nonlinearity_type: NonLinearityType,
pub swiglu_multiple_of: Option<usize>,
pub attn_kernel_type: AttnKernelType,
pub kv_cache_enabled: bool,
}
impl Config {
pub fn cfg1b_v0_1() -> Self {
Self {
n_layer: 6,
n_head: 6,
n_embd: 384,
block_size: 1024,
bias: false,
vocab_sizes: vec![1538, 1025],
causal: false,
target_vocab_sizes: vec![1025, 1025, 1025, 1025, 1025, 1025],
swiglu_multiple_of: Some(256),
norm_type: NormType::LayerNorm,
kv_cache_enabled: false,
attn_kernel_type: AttnKernelType::TorchAttn,
spk_emb_on_text: true,
nonlinearity_type: NonLinearityType::Gelu,
rmsnorm_eps: 1e-5,
}
}
}
impl Norm {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
match cfg.norm_type {
NormType::RMSNorm => {
let rms_norm = candle_nn::rms_norm(cfg.n_embd, cfg.rmsnorm_eps, vb)?;
Ok(Self::RMSNorm(rms_norm))
}
NormType::LayerNorm => {
let ln_cfg = candle_nn::LayerNormConfig {
affine: cfg.bias,
..Default::default()
};
let layer_norm = candle_nn::layer_norm(cfg.n_embd, ln_cfg, vb)?;
Ok(Self::LayerNorm(layer_norm))
}
}
}
}
impl Module for Norm {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
match self {
Self::RMSNorm(m) => m.forward(xs),
Self::LayerNorm(m) => m.forward(xs),
}
}
}
// https://github.com/metavoiceio/metavoice-src/blob/11550bb4e8a1ad032cc1556cc924f7a4e767cbfa/fam/llm/layers/attn.py#L18
struct SelfAttention {
c_attn: Linear,
c_proj: Linear,
n_head: usize,
span: tracing::Span,
}
impl SelfAttention {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
// The different attention variants are likely to be identical but still we only accept
// TorchAttn for now.
if cfg.attn_kernel_type != AttnKernelType::TorchAttn {
candle::bail!("only TorchAttn is supported")
}
if cfg.kv_cache_enabled {
candle::bail!("kv_cache_enabled=true is not supported")
}
let c_attn = linear_b(cfg.n_embd, cfg.n_embd * 3, cfg.bias, vb.pp("c_attn"))?;
let c_proj = linear_b(cfg.n_embd, cfg.n_embd, cfg.bias, vb.pp("c_proj"))?;
Ok(Self {
c_attn,
c_proj,
n_head: cfg.n_head,
span: tracing::span!(tracing::Level::TRACE, "self-attn"),
})
}
}
impl Module for SelfAttention {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let (b, t, c) = xs.dims3()?;
let c_x = xs
.apply(&self.c_attn)?
.reshape((b, t, 3, self.n_head, c / self.n_head))?;
let q = c_x.i((.., .., 0))?;
let k = c_x.i((.., .., 1))?;
let v = c_x.i((.., .., 2))?;
let q = q.transpose(1, 2)?.contiguous()?;
let k = k.transpose(1, 2)?.contiguous()?;
let v = v.transpose(1, 2)?.contiguous()?;
let att = (q.matmul(&k.t()?)? / (k.dim(D::Minus1)? as f64).sqrt())?;
// TODO: causal mask
let att = candle_nn::ops::softmax_last_dim(&att)?;
let att = att.matmul(&v)?.transpose(1, 2)?;
att.reshape((b, t, c))?.apply(&self.c_proj)
}
}
// https://github.com/metavoiceio/metavoice-src/blob/11550bb4e8a1ad032cc1556cc924f7a4e767cbfa/fam/llm/layers/layers.py#L43
#[allow(clippy::upper_case_acronyms)]
enum MLP {
Gelu {
c_fc: Linear,
c_proj: Linear,
span: tracing::Span,
},
Swiglu {
w1: Linear,
w3: Linear,
c_proj: Linear,
span: tracing::Span,
},
}
impl MLP {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_dim = 4 * cfg.n_embd;
let slf = match cfg.nonlinearity_type {
NonLinearityType::Gelu => {
let c_fc = linear_b(cfg.n_embd, hidden_dim, cfg.bias, vb.pp("c_fc"))?;
let c_proj = linear_b(hidden_dim, cfg.n_embd, cfg.bias, vb.pp("c_proj"))?;
Self::Gelu {
c_fc,
c_proj,
span: tracing::span!(tracing::Level::TRACE, "mlp-gelu"),
}
}
NonLinearityType::Swiglu => {
let hidden_dim = (2 * hidden_dim) / 3;
let swiglu_multiple_of = match cfg.swiglu_multiple_of {
None => candle::bail!("swiglu-multiple-of has to be set"),
Some(smo) => smo,
};
let hidden_dim = swiglu_multiple_of * (hidden_dim + swiglu_multiple_of - 1)
/ swiglu_multiple_of;
let w1 = linear_b(cfg.n_embd, hidden_dim, cfg.bias, vb.pp("w1"))?;
let w3 = linear_b(cfg.n_embd, hidden_dim, cfg.bias, vb.pp("w3"))?;
let c_proj = linear_b(hidden_dim, cfg.n_embd, cfg.bias, vb.pp("c_proj"))?;
Self::Swiglu {
w1,
w3,
c_proj,
span: tracing::span!(tracing::Level::TRACE, "mlp-swiglu"),
}
}
};
Ok(slf)
}
}
impl Module for MLP {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
match self {
Self::Gelu { c_fc, c_proj, span } => {
let _enter = span.enter();
xs.apply(c_fc)?.gelu()?.apply(c_proj)
}
Self::Swiglu {
w1,
w3,
c_proj,
span,
} => {
let _enter = span.enter();
let w1 = xs.apply(w1)?;
let w3 = xs.apply(w3)?;
(w1.silu()? * w3)?.apply(c_proj)
}
}
}
}
// https://github.com/metavoiceio/metavoice-src/blob/11550bb4e8a1ad032cc1556cc924f7a4e767cbfa/fam/llm/layers/combined.py#L7
struct Block {
ln_1: Norm,
ln_2: Norm,
attn: SelfAttention,
mlp: MLP,
span: tracing::Span,
}
impl Block {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let ln_1 = Norm::new(cfg, vb.pp("ln_1"))?;
let ln_2 = Norm::new(cfg, vb.pp("ln_2"))?;
let attn = SelfAttention::new(cfg, vb.pp("attn"))?;
let mlp = MLP::new(cfg, vb.pp("mlp"))?;
Ok(Block {
ln_1,
ln_2,
attn,
mlp,
span: tracing::span!(tracing::Level::TRACE, "gpt-block"),
})
}
}
impl Module for Block {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let xs = (xs + xs.apply(&self.ln_1)?.apply(&self.attn))?;
let xs = (&xs + xs.apply(&self.ln_2)?.apply(&self.mlp))?;
Ok(xs)
}
}
// https://github.com/metavoiceio/metavoice-src/blob/11550bb4e8a1ad032cc1556cc924f7a4e767cbfa/fam/llm/model.py#L79
#[allow(clippy::upper_case_acronyms)]
pub struct Model {
wtes: Vec<candle_nn::Embedding>,
wpe: candle_nn::Embedding,
h: Vec<Block>,
ln_f: Norm,
lm_heads: Vec<Linear>,
cfg: Config,
dtype: DType,
span: tracing::Span,
}
impl Model {
pub fn new(cfg: Config, vb: VarBuilder) -> Result<Self> {
let vb_t = vb.pp("transformer");
let ln_f = Norm::new(&cfg, vb_t.pp("ln_f"))?;
let mut wtes = Vec::with_capacity(cfg.vocab_sizes.len());
let vb_w = vb_t.pp("wtes");
for (idx, vocab_size) in cfg.vocab_sizes.iter().enumerate() {
let wte = candle_nn::embedding(*vocab_size, cfg.n_embd, vb_w.pp(idx))?;
wtes.push(wte)
}
let wpe = candle_nn::embedding(cfg.block_size, cfg.n_embd, vb_t.pp("wpe"))?;
let mut h = Vec::with_capacity(cfg.n_layer);
let vb_h = vb_t.pp("h");
for idx in 0..cfg.n_layer {
let block = Block::new(&cfg, vb_h.pp(idx))?;
h.push(block)
}
let mut lm_heads = Vec::with_capacity(cfg.target_vocab_sizes.len());
let vb_l = vb.pp("lm_heads");
for (idx, vocab_size) in cfg.target_vocab_sizes.iter().enumerate() {
let head = linear_b(cfg.n_embd, *vocab_size, false, vb_l.pp(idx))?;
lm_heads.push(head)
}
Ok(Self {
wtes,
wpe,
h,
ln_f,
lm_heads,
cfg,
dtype: vb.dtype(),
span: tracing::span!(tracing::Level::TRACE, "gpt"),
})
}
pub fn config(&self) -> &Config {
&self.cfg
}
pub fn forward(&self, idx: &Tensor) -> Result<Vec<Tensor>> {
let _enter = self.span.enter();
let device = idx.device();
let (b, _num_hierarchies, t) = idx.dims3()?;
let pos = Tensor::arange(0u32, t as u32, device)?;
let pos_emb = pos.apply(&self.wpe)?;
let mut tok_emb = Tensor::zeros((b, t, self.cfg.n_embd), self.dtype, device)?;
for (wte_idx, wte) in self.wtes.iter().enumerate() {
let emb = idx.i((.., wte_idx, ..))?.apply(wte)?;
tok_emb = (tok_emb + emb)?;
}
// TODO: speaker embs.
let spk_emb = 0f64;
let mut xs = (pos_emb.broadcast_add(&tok_emb)? + spk_emb)?;
for block in self.h.iter() {
xs = xs.apply(block)?
}
let xs = xs.apply(&self.ln_f)?;
let mut logits = Vec::with_capacity(self.lm_heads.len());
for lm_head in self.lm_heads.iter() {
// non-causal mode only.
let ys = xs.apply(lm_head)?;
logits.push(ys)
}
Ok(logits)
}
}
}
pub mod transformer {
use super::*;
#[derive(Debug, Clone, serde::Deserialize)]
pub struct Config {
pub block_size: usize,
pub vocab_size: usize,
pub n_layer: usize,
pub n_head: usize,
pub dim: usize,
pub speaker_emb_dim: usize,
pub intermediate_size: Option<usize>,
pub n_local_heads: Option<usize>,
pub norm_eps: f64,
}
impl Config {
pub fn cfg1b_v0_1() -> Self {
Self {
n_layer: 24,
n_head: 16,
dim: 2048,
vocab_size: 2562,
speaker_emb_dim: 256,
block_size: 2048,
intermediate_size: None,
n_local_heads: None,
norm_eps: 1e-5,
}
}
pub(crate) fn n_local_heads(&self) -> usize {
self.n_local_heads.unwrap_or(self.n_head)
}
pub(crate) fn head_dim(&self) -> usize {
self.dim / self.n_head
}
pub(crate) fn intermediate_size(&self) -> usize {
match self.intermediate_size {
Some(intermediate_size) => intermediate_size,
None => {
let hidden_dim = self.dim * 4;
let n_hidden = ((2 * hidden_dim) as f64 / 3.) as usize;
(n_hidden + 255) / 256 * 256
}
}
}
}
#[derive(Debug, Clone)]
struct FeedForward {
w1: Linear,
w2: Linear,
w3: Linear,
span: tracing::Span,
}
impl FeedForward {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let i_size = cfg.intermediate_size();
let w1 = linear_b(cfg.dim, i_size, false, vb.pp("swiglu.w1"))?;
let w2 = linear_b(i_size, cfg.dim, false, vb.pp("w2"))?;
let w3 = linear_b(cfg.dim, i_size, false, vb.pp("swiglu.w3"))?;
Ok(Self {
w1,
w2,
w3,
span: tracing::span!(tracing::Level::TRACE, "feed-forward"),
})
}
}
impl Module for FeedForward {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let swiglu = (candle_nn::ops::silu(&xs.apply(&self.w1)?)? * xs.apply(&self.w3))?;
swiglu.apply(&self.w2)
}
}
#[derive(Debug, Clone)]
struct Attention {
wqkv: Linear,
wo: Linear,
dim: usize,
kv_size: usize,
n_local_heads: usize,
head_dim: usize,
n_head: usize,
kv_cache: Option<(Tensor, Tensor)>,
span: tracing::Span,
}
impl Attention {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let n_local_heads = cfg.n_local_heads();
let head_dim = cfg.head_dim();
let total_head_dim = (cfg.n_head + 2 * n_local_heads) * head_dim;
let wqkv = linear_b(cfg.dim, total_head_dim, false, vb.pp("wqkv"))?;
let wo = linear_b(cfg.dim, cfg.dim, false, vb.pp("wo"))?;
Ok(Self {
wqkv,
wo,
dim: cfg.dim,
kv_size: n_local_heads * head_dim,
n_local_heads,
head_dim,
n_head: cfg.n_head,
kv_cache: None,
span: tracing::span!(tracing::Level::TRACE, "feed-forward"),
})
}
fn forward(&mut self, xs: &Tensor, _pos: usize, mask: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let (b_sz, seqlen, _) = xs.dims3()?;
let qkv = xs.apply(&self.wqkv)?;
let q = qkv.narrow(D::Minus1, 0, self.dim)?;
let k = qkv.narrow(D::Minus1, self.dim, self.kv_size)?;
let v = qkv.narrow(D::Minus1, self.dim + self.kv_size, self.kv_size)?;
let q = q
.reshape((b_sz, seqlen, self.n_head, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let k = k
.reshape((b_sz, seqlen, self.n_local_heads, self.head_dim))?
.transpose(1, 2)?;
let v = v
.reshape((b_sz, seqlen, self.n_local_heads, self.head_dim))?
.transpose(1, 2)?;
let (k, v) = match &self.kv_cache {
None => (k, v),
Some((prev_k, prev_v)) => {
let k = Tensor::cat(&[prev_k, &k], 2)?;
let v = Tensor::cat(&[prev_v, &v], 2)?;
(k, v)
}
};
self.kv_cache = Some((k.clone(), v.clone()));
let k = repeat_interleave(&k, self.n_head / self.n_local_heads, 1)?;
let v = repeat_interleave(&v, self.n_head / self.n_local_heads, 1)?;
let scale = 1f64 / f64::sqrt(self.head_dim as f64);
let attn_weights = (q.matmul(&k.transpose(2, 3)?)? * scale)?;
let attn_weights = attn_weights.broadcast_add(mask)?;
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
let attn_output = attn_weights.matmul(&v)?;
attn_output
.transpose(1, 2)?
.reshape((b_sz, seqlen, self.dim))?
.apply(&self.wo)
}
fn clear_kv_cache(&mut self) {
self.kv_cache = None
}
}
#[derive(Debug, Clone)]
struct Block {
attention: Attention,
feed_forward: FeedForward,
ffn_norm: RmsNorm,
attention_norm: RmsNorm,
span: tracing::Span,
}
impl Block {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let attention = Attention::new(cfg, vb.pp("attention"))?;
let feed_forward = FeedForward::new(cfg, vb.pp("feed_forward"))?;
let ffn_norm = rms_norm(cfg.dim, cfg.norm_eps, vb.pp("ffn_norm"))?;
let attention_norm = rms_norm(cfg.dim, cfg.norm_eps, vb.pp("attention_norm"))?;
Ok(Self {
attention,
feed_forward,
ffn_norm,
attention_norm,
span: tracing::span!(tracing::Level::TRACE, "block"),
})
}
fn forward(&mut self, xs: &Tensor, pos: usize, mask: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let hs = xs.apply(&self.attention_norm)?;
let hs = (xs + self.attention.forward(&hs, pos, mask))?;
&hs + hs.apply(&self.ffn_norm)?.apply(&self.feed_forward)
}
fn clear_kv_cache(&mut self) {
self.attention.clear_kv_cache()
}
}
#[derive(Debug, Clone)]
pub struct Model {
tok_embeddings: Embedding,
pos_embeddings: Embedding,
speaker_cond_pos: Linear,
layers: Vec<Block>,
norm: RmsNorm,
output: Linear,
spk_cond_mask: Tensor,
span: tracing::Span,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let tok_embeddings = embedding(cfg.vocab_size, cfg.dim, vb.pp("tok_embeddings"))?;
let pos_embeddings = embedding(cfg.block_size, cfg.dim, vb.pp("pos_embeddings"))?;
let speaker_cond_pos = linear_b(
cfg.speaker_emb_dim,
cfg.dim,
false,
vb.pp("speaker_cond_pos"),
)?;
let mut layers = Vec::with_capacity(cfg.n_layer);
let vb_l = vb.pp("layers");
for layer_idx in 0..cfg.n_layer {
let layer = Block::new(cfg, vb_l.pp(layer_idx))?;
layers.push(layer)
}
let norm = rms_norm(cfg.dim, cfg.norm_eps, vb.pp("norm"))?;
let output = linear_b(cfg.dim, cfg.vocab_size, false, vb.pp("output"))?;
let dtype = vb.dtype();
let spk_cond_mask = Tensor::cat(
&[
Tensor::ones((1, 1, cfg.dim), dtype, vb.device())?,
Tensor::zeros((1, 1, cfg.dim), dtype, vb.device())?,
],
0,
)?;
Ok(Self {
tok_embeddings,
pos_embeddings,
speaker_cond_pos,
layers,
norm,
output,
spk_cond_mask,
span: tracing::span!(tracing::Level::TRACE, "transformer"),
})
}
pub fn clear_kv_cache(&mut self) {
for layer in self.layers.iter_mut() {
layer.clear_kv_cache()
}
}
pub fn forward(&mut self, xs: &Tensor, spk_emb: &Tensor, pos: usize) -> Result<Tensor> {
let _enter = self.span.enter();
let (_b_sz, seqlen) = xs.dims2()?;
let mask: Vec<_> = (0..seqlen)
.flat_map(|i| (0..seqlen).map(move |j| if i < j { f32::NEG_INFINITY } else { 0. }))
.collect();
let mask = Tensor::from_slice(&mask, (1, 1, seqlen, seqlen), xs.device())?;
let input_pos = Tensor::arange(pos as u32, (pos + seqlen) as u32, xs.device())?;
let tok_embeddings = xs.apply(&self.tok_embeddings)?;
let pos_embeddings = input_pos.apply(&self.pos_embeddings)?;
let mut xs = tok_embeddings
.broadcast_add(&pos_embeddings)?
.broadcast_add(
&spk_emb
.apply(&self.speaker_cond_pos)?
.broadcast_mul(&self.spk_cond_mask)?,
)?;
let mask = mask.to_dtype(xs.dtype())?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, pos, &mask)?
}
xs.narrow(1, seqlen - 1, 1)?
.apply(&self.norm)?
.apply(&self.output)
}
}
}
pub mod adapters {
// https://github.com/metavoiceio/metavoice-src/blob/9078234c496d76adbec06df789b6b04b1875f129/fam/llm/adapters/tilted_encodec.py
pub struct TiltedEncodec {
end_of_audio_token: u32,
span: tracing::Span,
}
impl TiltedEncodec {
pub fn new(end_of_audio_token: u32) -> Self {
Self {
end_of_audio_token,
span: tracing::span!(tracing::Level::TRACE, "tilted-encodec"),
}
}
pub fn decode(&self, tokens: &[Vec<u32>]) -> (Vec<u32>, Vec<Vec<u32>>) {
let _enter = self.span.enter();
let mut text_ids = vec![];
let mut extracted_audio_ids = vec![];
let mut min_audio_ids_len = usize::MAX;
for (book_id, tokens) in tokens.iter().enumerate() {
let mut audio_ids = vec![];
for &t in tokens.iter() {
#[allow(clippy::comparison_chain)]
if t > self.end_of_audio_token {
if book_id == 0 {
text_ids.push(t)
}
} else if t < self.end_of_audio_token {
audio_ids.push(t)
}
}
min_audio_ids_len = usize::min(min_audio_ids_len, audio_ids.len());
extracted_audio_ids.push(audio_ids)
}
for audio_ids in extracted_audio_ids.iter_mut() {
audio_ids.truncate(min_audio_ids_len)
}
(text_ids, extracted_audio_ids)
}
}
// https://github.com/metavoiceio/metavoice-src/blob/9078234c496d76adbec06df789b6b04b1875f129/fam/llm/adapters/flattened_encodec.py#L4
pub struct FlattenedInterleavedEncodec2Codebook {
end_of_audio_token: u32,
span: tracing::Span,
}
impl FlattenedInterleavedEncodec2Codebook {
pub fn new(end_of_audio_token: u32) -> Self {
Self {
end_of_audio_token,
span: tracing::span!(tracing::Level::TRACE, "encodec2codebook"),
}
}
pub fn decode(&self, tokens: &[u32]) -> (Vec<u32>, Vec<u32>, Vec<u32>) {
let _enter = self.span.enter();
let mut text_ids = vec![];
let mut audio_ids1 = vec![];
let mut audio_ids2 = vec![];
for &t in tokens.iter() {
#[allow(clippy::comparison_chain)]
if t < self.end_of_audio_token {
audio_ids1.push(t)
} else if t < 2 * self.end_of_audio_token {
audio_ids2.push(t - self.end_of_audio_token)
} else {
text_ids.push(t)
}
}
(text_ids, audio_ids1, audio_ids2)
}
}
}
| candle/candle-transformers/src/models/metavoice.rs/0 | {
"file_path": "candle/candle-transformers/src/models/metavoice.rs",
"repo_id": "candle",
"token_count": 21694
} | 39 |
use crate::quantized_nn::{layer_norm_no_bias, linear_no_bias, Embedding, Linear};
pub use crate::quantized_var_builder::VarBuilder;
/// MPT model used by replit-code-v1_5-3b
/// https://huggingface.co/replit/replit-code-v1_5-3b/blob/main/modeling_mpt.py
use candle::{IndexOp, Module, Result, Tensor, D};
use candle_nn::LayerNorm;
pub use super::mpt::Config;
#[derive(Debug, Clone)]
struct GroupedQueryAttention {
wqkv: Linear,
out_proj: Linear,
kv_cache: Option<(Tensor, Tensor)>,
softmax_scale: f64,
head_dim: usize,
d_model: usize,
n_heads: usize,
kv_n_heads: usize,
attn_bias: Tensor,
span: tracing::Span,
}
impl GroupedQueryAttention {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let head_dim = cfg.d_model / cfg.n_heads;
let wqkv_size = cfg.d_model + 2 * cfg.kv_n_heads * head_dim;
let wqkv = linear_no_bias(cfg.d_model, wqkv_size, vb.pp("Wqkv"))?;
let softmax_scale = 1f64 / (head_dim as f64).sqrt();
let out_proj = linear_no_bias(cfg.d_model, cfg.d_model, vb.pp("out_proj"))?;
let attn_bias = super::mpt::build_alibi_bias(cfg)?.to_device(vb.device())?;
Ok(Self {
wqkv,
out_proj,
kv_cache: None,
softmax_scale,
head_dim,
d_model: cfg.d_model,
n_heads: cfg.n_heads,
kv_n_heads: cfg.kv_n_heads,
attn_bias,
span: tracing::span!(tracing::Level::TRACE, "gqa"),
})
}
fn forward(&mut self, xs: &Tensor, mask: Option<&Tensor>) -> Result<Tensor> {
let _enter = self.span.enter();
let (b_size, seq_len, _n_embd) = xs.dims3()?;
let qkv = self.wqkv.forward(xs)?;
let query = qkv.narrow(2, 0, self.d_model)?;
let kv_size = self.kv_n_heads * self.head_dim;
let key = qkv.narrow(2, self.d_model, kv_size)?;
let value = qkv.narrow(2, self.d_model + kv_size, kv_size)?;
// scaled_multihead_dot_product_attention
let query = query
.reshape((b_size, seq_len, self.n_heads, ()))?
.transpose(1, 2)?; // b,h,s,d
let key = key
.reshape((b_size, seq_len, self.kv_n_heads, ()))?
.permute((0, 2, 3, 1))?; // b,h,d,s
let value = value
.reshape((b_size, seq_len, self.kv_n_heads, ()))?
.transpose(1, 2)?; // b,h,s,d
let (key, value) = match &self.kv_cache {
None => (key, value),
Some((prev_k, prev_v)) => {
let k = Tensor::cat(&[prev_k, &key], 3)?;
let v = Tensor::cat(&[prev_v, &value], 2)?;
(k, v)
}
};
self.kv_cache = Some((key.clone(), value.clone()));
let query = query.contiguous()?;
let key = super::mpt::repeat_kv(key, self.n_heads / self.kv_n_heads)?.contiguous()?;
let value = super::mpt::repeat_kv(value, self.n_heads / self.kv_n_heads)?.contiguous()?;
let attn_weights = (query.matmul(&key)? * self.softmax_scale)?;
let attn_bias = {
let s_q = query.dim(D::Minus2)?;
let s_k = key.dim(D::Minus1)?;
let (_, _, a_q, a_k) = self.attn_bias.dims4()?;
let start_q = a_q.saturating_sub(s_q);
let start_k = a_k.saturating_sub(s_k);
self.attn_bias.i((.., .., start_q.., start_k..))?
};
let attn_weights = attn_weights.broadcast_add(&attn_bias)?;
let attn_weights = match mask {
None => attn_weights,
Some(mask) => super::mpt::masked_fill(
&attn_weights,
&mask.broadcast_as(attn_weights.shape())?,
f32::NEG_INFINITY,
)?,
};
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
let attn_output = attn_weights
.matmul(&value)?
.transpose(1, 2)?
.flatten_from(D::Minus2)?;
let out = attn_output.apply(&self.out_proj)?;
Ok(out)
}
}
#[derive(Debug, Clone)]
struct Ffn {
up_proj: Linear,
down_proj: Linear,
}
impl Ffn {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden = cfg.d_model * cfg.expansion_ratio;
let up_proj = linear_no_bias(cfg.d_model, hidden, vb.pp("up_proj"))?;
let down_proj = linear_no_bias(hidden, cfg.d_model, vb.pp("down_proj"))?;
Ok(Self { up_proj, down_proj })
}
}
impl Module for Ffn {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.up_proj)?.gelu_erf()?.apply(&self.down_proj)
}
}
#[derive(Debug, Clone)]
struct MPTBlock {
norm1: LayerNorm, // Do we need the low-precision variant?
attn: GroupedQueryAttention,
norm2: LayerNorm,
ffn: Ffn,
}
impl MPTBlock {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let norm1 = layer_norm_no_bias(cfg.d_model, 1e-5, vb.pp("norm_1"))?;
let norm2 = layer_norm_no_bias(cfg.d_model, 1e-5, vb.pp("norm_2"))?;
let attn = GroupedQueryAttention::new(cfg, vb.pp("attn"))?;
let ffn = Ffn::new(cfg, vb.pp("ffn"))?;
Ok(Self {
norm1,
attn,
norm2,
ffn,
})
}
fn forward(&mut self, xs: &Tensor, mask: Option<&Tensor>) -> Result<Tensor> {
let residual = xs;
let xs = xs.apply(&self.norm1)?;
let xs = self.attn.forward(&xs, mask)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = xs.apply(&self.norm2)?.apply(&self.ffn)?;
xs + residual
}
}
#[derive(Debug, Clone)]
pub struct Model {
wte: Embedding,
blocks: Vec<MPTBlock>,
norm_f: LayerNorm,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let wte = Embedding::new(cfg.vocab_size, cfg.d_model, vb.pp("wte"))?;
let vb_b = vb.pp("blocks");
let mut blocks = Vec::with_capacity(cfg.n_layers);
for i in 0..cfg.n_layers {
let block = MPTBlock::new(cfg, vb_b.pp(i))?;
blocks.push(block)
}
let norm_f = layer_norm_no_bias(cfg.d_model, 1e-5, vb.pp("norm_f"))?;
Ok(Self {
wte,
blocks,
norm_f,
})
}
pub fn forward(&mut self, xs: &Tensor) -> Result<Tensor> {
let (_b_size, seq_len) = xs.dims2()?;
let mut xs = xs.apply(&self.wte)?;
let mask = if seq_len <= 1 {
None
} else {
Some(super::mpt::get_mask(seq_len, xs.device())?)
};
for block in self.blocks.iter_mut() {
xs = block.forward(&xs, mask.as_ref())?;
}
let xs = xs.apply(&self.norm_f)?;
let logits = xs
.narrow(1, seq_len - 1, 1)?
.squeeze(1)?
.matmul(&self.wte.embeddings().t()?)?
.squeeze(1)?;
Ok(logits)
}
}
| candle/candle-transformers/src/models/quantized_mpt.rs/0 | {
"file_path": "candle/candle-transformers/src/models/quantized_mpt.rs",
"repo_id": "candle",
"token_count": 3728
} | 40 |
// Adapted from:
// https://github.com/ChaoningZhang/MobileSAM/blob/master/mobile_sam/modeling/tiny_vit_sam.py
use candle::{IndexOp, Result, Tensor, D};
use candle_nn::{Conv2dConfig, Module, VarBuilder};
const MBCONV_EXPAND_RATIO: usize = 4;
const MLP_RATIO: usize = 4;
const LOCAL_CONV_SIZE: usize = 3;
const IMG_SIZE: usize = 1024;
const IN_CHANNELS: usize = 3;
#[derive(Debug)]
struct Conv2dBN {
c: candle_nn::Conv2d,
bn: candle_nn::BatchNorm,
span: tracing::Span,
}
impl Conv2dBN {
fn new(in_: usize, out: usize, ks: usize, cfg: Conv2dConfig, vb: VarBuilder) -> Result<Self> {
let c = candle_nn::conv2d_no_bias(in_, out, ks, cfg, vb.pp("c"))?;
let bn = candle_nn::batch_norm(out, 1e-5, vb.pp("bn"))?;
let span = tracing::span!(tracing::Level::TRACE, "conv2d-bn");
Ok(Self { c, bn, span })
}
}
impl Module for Conv2dBN {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
xs.apply(&self.c)?.apply_t(&self.bn, false)
}
}
#[derive(Debug)]
struct PatchEmbed {
conv1: Conv2dBN,
conv2: Conv2dBN,
span: tracing::Span,
}
impl PatchEmbed {
fn new(in_chans: usize, embed_dim: usize, vb: VarBuilder) -> Result<Self> {
let cfg = candle_nn::Conv2dConfig {
stride: 2,
padding: 1,
..Default::default()
};
let conv1 = Conv2dBN::new(in_chans, embed_dim / 2, 3, cfg, vb.pp("seq.0"))?;
let conv2 = Conv2dBN::new(embed_dim / 2, embed_dim, 3, cfg, vb.pp("seq.2"))?;
let span = tracing::span!(tracing::Level::TRACE, "patch-embed");
Ok(Self { conv1, conv2, span })
}
}
impl Module for PatchEmbed {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
xs.apply(&self.conv1)?.gelu()?.apply(&self.conv2)
}
}
#[derive(Debug)]
struct MBConv {
conv1: Conv2dBN,
conv2: Conv2dBN,
conv3: Conv2dBN,
span: tracing::Span,
}
impl MBConv {
fn new(in_: usize, out: usize, expand_ratio: usize, vb: VarBuilder) -> Result<Self> {
let hidden = in_ * expand_ratio;
let cfg2 = candle_nn::Conv2dConfig {
padding: 1,
groups: hidden,
..Default::default()
};
let conv1 = Conv2dBN::new(in_, hidden, 1, Default::default(), vb.pp("conv1"))?;
let conv2 = Conv2dBN::new(hidden, hidden, 3, cfg2, vb.pp("conv2"))?;
let conv3 = Conv2dBN::new(hidden, out, 1, Default::default(), vb.pp("conv3"))?;
let span = tracing::span!(tracing::Level::TRACE, "mb-conv");
Ok(Self {
conv1,
conv2,
conv3,
span,
})
}
}
impl Module for MBConv {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let shortcut = xs;
let xs = xs
.apply(&self.conv1)?
.gelu()?
.apply(&self.conv2)?
.gelu()?
.apply(&self.conv3)?;
(xs + shortcut)?.gelu()
}
}
#[derive(Debug)]
struct PatchMerging {
conv1: Conv2dBN,
conv2: Conv2dBN,
conv3: Conv2dBN,
input_resolution: (usize, usize),
span: tracing::Span,
}
impl PatchMerging {
fn new(
input_resolution: (usize, usize),
dim: usize,
out: usize,
vb: VarBuilder,
) -> Result<Self> {
let stride = if [320, 448, 576].contains(&out) { 1 } else { 2 };
let cfg2 = candle_nn::Conv2dConfig {
padding: 1,
stride,
groups: out,
..Default::default()
};
let conv1 = Conv2dBN::new(dim, out, 1, Default::default(), vb.pp("conv1"))?;
let conv2 = Conv2dBN::new(out, out, 3, cfg2, vb.pp("conv2"))?;
let conv3 = Conv2dBN::new(out, out, 1, Default::default(), vb.pp("conv3"))?;
let span = tracing::span!(tracing::Level::TRACE, "patch-merging");
Ok(Self {
conv1,
conv2,
conv3,
input_resolution,
span,
})
}
}
impl Module for PatchMerging {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let xs = if xs.rank() == 3 {
let (h, w) = self.input_resolution;
let b = xs.dim(0)?;
xs.reshape((b, h, w, ()))?.permute((0, 3, 1, 2))?
} else {
xs.clone()
};
xs.apply(&self.conv1)?
.gelu()?
.apply(&self.conv2)?
.gelu()?
.apply(&self.conv3)?
.flatten_from(2)?
.transpose(1, 2)
}
}
#[derive(Debug)]
struct ConvLayer {
blocks: Vec<MBConv>,
downsample: Option<PatchMerging>,
span: tracing::Span,
}
impl ConvLayer {
fn new(
dim: usize,
out: usize,
input_resolution: (usize, usize),
depth: usize,
downsample: bool,
conv_expand_ratio: usize,
vb: VarBuilder,
) -> Result<Self> {
let vb_b = vb.pp("blocks");
let mut blocks = Vec::with_capacity(depth);
for index in 0..depth {
let block = MBConv::new(dim, dim, conv_expand_ratio, vb_b.pp(index))?;
blocks.push(block)
}
let downsample = if downsample {
let downsample = PatchMerging::new(input_resolution, dim, out, vb.pp("downsample"))?;
Some(downsample)
} else {
None
};
let span = tracing::span!(tracing::Level::TRACE, "conv-layer");
Ok(Self {
blocks,
downsample,
span,
})
}
}
impl Module for ConvLayer {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let mut xs = xs.clone();
for block in self.blocks.iter() {
xs = block.forward(&xs)?
}
match &self.downsample {
None => Ok(xs),
Some(downsample) => downsample.forward(&xs),
}
}
}
#[derive(Debug)]
struct Mlp {
norm: candle_nn::LayerNorm,
fc1: super::Linear,
fc2: super::Linear,
span: tracing::Span,
}
impl Mlp {
fn new(in_: usize, hidden: usize, vb: VarBuilder) -> Result<Self> {
let norm = candle_nn::layer_norm(in_, 1e-5, vb.pp("norm"))?;
let fc1 = super::linear(vb.pp("fc1"), in_, hidden, true)?;
let fc2 = super::linear(vb.pp("fc2"), hidden, in_, true)?;
let span = tracing::span!(tracing::Level::TRACE, "mlp");
Ok(Self {
norm,
fc1,
fc2,
span,
})
}
}
impl Module for Mlp {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
xs.apply(&self.norm)?
.apply(&self.fc1)?
.gelu()?
.apply(&self.fc2)
}
}
#[derive(Debug)]
struct Attention {
norm: candle_nn::LayerNorm,
qkv: super::Linear,
proj: super::Linear,
ab: Tensor,
key_dim: usize,
num_heads: usize,
d: usize,
dh: usize,
scale: f64,
span: tracing::Span,
span_matmul: tracing::Span,
span_softmax: tracing::Span,
}
impl Attention {
fn new(
dim: usize,
key_dim: usize,
num_heads: usize,
attn_ratio: usize,
resolution: (usize, usize),
vb: VarBuilder,
) -> Result<Self> {
let d = attn_ratio * key_dim;
let dh = d * num_heads;
let nh_kd = key_dim * num_heads;
let h = dh + nh_kd * 2;
let norm = candle_nn::layer_norm(dim, 1e-5, vb.pp("norm"))?;
let qkv = super::linear(vb.pp("qkv"), dim, h, true)?;
let proj = super::linear(vb.pp("proj"), dh, dim, true)?;
let points = (0..resolution.0)
.flat_map(|x| (0..resolution.1).map(move |y| (x as i64, y as i64)))
.collect::<Vec<_>>();
let mut idxs = Vec::with_capacity(points.len() * points.len());
let mut attention_offsets = std::collections::HashMap::new();
for &(x1, y1) in points.iter() {
for &(x2, y2) in points.iter() {
let offset = ((x2 - x1).abs(), (y2 - y1).abs());
let l = attention_offsets.len();
let idx = attention_offsets.entry(offset).or_insert(l);
idxs.push(*idx as u32)
}
}
let attention_biases = vb.get((num_heads, attention_offsets.len()), "attention_biases")?;
let idxs = Tensor::new(idxs, attention_biases.device())?;
let ab =
attention_biases
.index_select(&idxs, 1)?
.reshape(((), points.len(), points.len()))?;
let span = tracing::span!(tracing::Level::TRACE, "attention");
let span_matmul = tracing::span!(tracing::Level::TRACE, "attn-matmul");
let span_softmax = tracing::span!(tracing::Level::TRACE, "attn-sm");
Ok(Self {
norm,
qkv,
proj,
ab,
key_dim,
num_heads,
d,
dh,
scale: 1f64 / (key_dim as f64).sqrt(),
span,
span_matmul,
span_softmax,
})
}
}
impl Module for Attention {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let (b, n, _) = xs.dims3()?;
let xs = xs.apply(&self.norm)?;
let qkv = xs.apply(&self.qkv)?.reshape((b, n, self.num_heads, ()))?;
let q = qkv
.narrow(D::Minus1, 0, self.key_dim)?
.permute((0, 2, 1, 3))?
.contiguous()?;
let k = qkv
.narrow(D::Minus1, self.key_dim, self.key_dim)?
.permute((0, 2, 1, 3))?
.contiguous()?;
let v = qkv
.narrow(D::Minus1, 2 * self.key_dim, self.d)?
.permute((0, 2, 1, 3))?
.contiguous()?;
let attn = {
let _enter = self.span_matmul.enter();
(q.matmul(&k.t()?)? * self.scale)?
};
let attn = attn.broadcast_add(&self.ab)?;
let attn = {
let _enter = self.span_softmax.enter();
candle_nn::ops::softmax_last_dim(&attn)?
};
let attn = {
let _enter = self.span_matmul.enter();
attn.matmul(&v)?
};
attn.transpose(1, 2)?
.reshape((b, n, self.dh))?
.apply(&self.proj)
}
}
#[derive(Debug)]
struct TinyViTBlock {
attn: Attention,
local_conv: Conv2dBN,
mlp: Mlp,
window_size: usize,
input_resolution: (usize, usize),
span: tracing::Span,
}
impl TinyViTBlock {
fn new(
dim: usize,
input_resolution: (usize, usize),
num_heads: usize,
window_size: usize,
vb: VarBuilder,
) -> Result<Self> {
let head_dim = dim / num_heads;
let attn = Attention::new(
dim,
head_dim,
num_heads,
1,
(window_size, window_size),
vb.pp("attn"),
)?;
let mlp = Mlp::new(dim, dim * MLP_RATIO, vb.pp("mlp"))?;
let cfg = candle_nn::Conv2dConfig {
padding: LOCAL_CONV_SIZE / 2,
groups: dim,
..Default::default()
};
let local_conv = Conv2dBN::new(dim, dim, LOCAL_CONV_SIZE, cfg, vb.pp("local_conv"))?;
let span = tracing::span!(tracing::Level::TRACE, "attention");
Ok(Self {
attn,
local_conv,
mlp,
window_size,
input_resolution,
span,
})
}
}
impl Module for TinyViTBlock {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let (h, w) = self.input_resolution;
let (b, l, c) = xs.dims3()?;
let res_x = xs;
let xs = if h == self.window_size && w == self.window_size {
self.attn.forward(xs)?
} else {
let xs = xs.reshape((b, h, w, c))?;
let pad_b = (self.window_size - h % self.window_size) % self.window_size;
let pad_r = (self.window_size - w % self.window_size) % self.window_size;
let xs = if pad_b > 0 {
xs.pad_with_zeros(1, 0, pad_b)?
} else {
xs
};
let xs = if pad_r > 0 {
xs.pad_with_zeros(2, 0, pad_r)?
} else {
xs
};
let (p_h, p_w) = (h + pad_b, w + pad_r);
let n_h = p_h / self.window_size;
let n_w = p_w / self.window_size;
let xs = xs
.reshape((b, n_h, self.window_size, n_w, self.window_size, c))?
.transpose(2, 3)?
.reshape((b * n_h * n_w, self.window_size * self.window_size, c))?;
let xs = self.attn.forward(&xs)?;
let xs = xs
.reshape((b, n_h, n_w, self.window_size, self.window_size, c))?
.transpose(2, 3)?
.reshape((b, p_h, p_w, c))?;
let xs = if pad_r > 0 {
xs.i((.., .., ..w))?.contiguous()?
} else {
xs
};
let xs = if pad_b > 0 {
xs.i((.., ..h, ..))?.contiguous()?
} else {
xs
};
xs.reshape((b, l, c))?
};
let xs = (xs + res_x)?;
let xs = xs
.transpose(1, 2)?
.reshape((b, c, h, w))?
.apply(&self.local_conv)?
.reshape((b, c, l))?
.transpose(1, 2)?;
&xs + self.mlp.forward(&xs)?
}
}
#[derive(Debug)]
struct BasicLayer {
blocks: Vec<TinyViTBlock>,
downsample: Option<PatchMerging>,
span: tracing::Span,
}
impl BasicLayer {
#[allow(clippy::too_many_arguments)]
fn new(
dim: usize,
input_resolution: (usize, usize),
depth: usize,
num_heads: usize,
window_size: usize,
downsample: bool,
out: usize,
vb: VarBuilder,
) -> Result<Self> {
let vb_b = vb.pp("blocks");
let mut blocks = Vec::with_capacity(depth);
for index in 0..depth {
let block = TinyViTBlock::new(
dim,
input_resolution,
num_heads,
window_size,
vb_b.pp(index),
)?;
blocks.push(block)
}
let downsample = if downsample {
let downsample = PatchMerging::new(input_resolution, dim, out, vb.pp("downsample"))?;
Some(downsample)
} else {
None
};
let span = tracing::span!(tracing::Level::TRACE, "basic-layer");
Ok(Self {
blocks,
downsample,
span,
})
}
}
impl Module for BasicLayer {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let mut xs = xs.clone();
for block in self.blocks.iter() {
xs = block.forward(&xs)?
}
match &self.downsample {
None => Ok(xs),
Some(downsample) => downsample.forward(&xs),
}
}
}
#[derive(Debug)]
pub struct TinyViT {
patch_embed: PatchEmbed,
layer0: ConvLayer,
layers: Vec<BasicLayer>,
// norm_head: candle_nn::LayerNorm,
// head: candle_nn::Linear,
neck_conv1: candle_nn::Conv2d,
neck_ln1: super::LayerNorm2d,
neck_conv2: candle_nn::Conv2d,
neck_ln2: super::LayerNorm2d,
span: tracing::Span,
span_neck: tracing::Span,
}
impl TinyViT {
pub fn new(
embed_dims: &[usize],
depths: &[usize],
num_heads: &[usize],
window_sizes: &[usize],
_num_classes: usize,
vb: VarBuilder,
) -> Result<Self> {
let patch_embed = PatchEmbed::new(IN_CHANNELS, embed_dims[0], vb.pp("patch_embed"))?;
let patches_resolution = IMG_SIZE / 4;
let vb_l = vb.pp("layers");
let layer0 = ConvLayer::new(
/* dim */ embed_dims[0],
/* out */ embed_dims[1],
/* input_resolution */ (patches_resolution, patches_resolution),
/* depth */ depths[0],
/* downsample */ true,
/* conv_expand_ratio */ MBCONV_EXPAND_RATIO,
vb_l.pp(0),
)?;
let num_layers = embed_dims.len();
let mut layers = Vec::with_capacity(num_layers - 1);
for i_layer in 1..num_layers {
let patches_resolution = patches_resolution / (1 << usize::min(i_layer, 2));
let layer = BasicLayer::new(
/* dim */ embed_dims[i_layer],
/* input_resolution */ (patches_resolution, patches_resolution),
/* depth */ depths[i_layer],
/* num_heads */ num_heads[i_layer],
/* window_size */ window_sizes[i_layer],
/* downsample */ i_layer < num_layers - 1,
/* out */ embed_dims[usize::min(i_layer + 1, num_layers - 1)],
vb_l.pp(i_layer),
)?;
layers.push(layer)
}
let last_embed_dim = embed_dims[embed_dims.len() - 1];
// let norm_head = candle_nn::layer_norm(last_embed_dim, 1e-5, vb.pp("norm_head"))?;
// let head = candle_nn::linear(last_embed_dim, num_classes, vb.pp("head"))?;
let neck_conv1 =
candle_nn::conv2d_no_bias(last_embed_dim, 256, 1, Default::default(), vb.pp("neck.0"))?;
let neck_ln1 = super::LayerNorm2d::new(256, 1e-6, vb.pp("neck.1"))?;
let cfg = candle_nn::Conv2dConfig {
padding: 1,
..Default::default()
};
let neck_conv2 = candle_nn::conv2d_no_bias(256, 256, 3, cfg, vb.pp("neck.2"))?;
let neck_ln2 = super::LayerNorm2d::new(256, 1e-6, vb.pp("neck.3"))?;
let span = tracing::span!(tracing::Level::TRACE, "tiny-vit");
let span_neck = tracing::span!(tracing::Level::TRACE, "neck");
Ok(Self {
patch_embed,
layer0,
layers,
neck_conv1,
neck_ln1,
neck_conv2,
neck_ln2,
span,
span_neck,
})
}
}
impl Module for TinyViT {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let xs = self.patch_embed.forward(xs)?;
let mut xs = self.layer0.forward(&xs)?;
for layer in self.layers.iter() {
xs = layer.forward(&xs)?
}
let (b, _, c) = xs.dims3()?;
let _enter = self.span_neck.enter();
xs.reshape((b, 64, 64, c))?
.permute((0, 3, 1, 2))?
.apply(&self.neck_conv1)?
.apply(&self.neck_ln1)?
.apply(&self.neck_conv2)?
.apply(&self.neck_ln2)
}
}
pub fn tiny_vit_5m(vb: VarBuilder) -> Result<TinyViT> {
TinyViT::new(
/* embed_dims */ &[64, 128, 160, 320],
/* depths */ &[2, 2, 6, 2],
/* num_heads */ &[2, 4, 5, 10],
/* window_sizes */ &[7, 7, 14, 7],
/* num_classes */ 1000,
vb,
)
}
| candle/candle-transformers/src/models/segment_anything/tiny_vit.rs/0 | {
"file_path": "candle/candle-transformers/src/models/segment_anything/tiny_vit.rs",
"repo_id": "candle",
"token_count": 10372
} | 41 |
#![allow(unused)]
use candle::{DType, Device, Module, Result, Tensor, D};
use candle_nn::{layer_norm, linear_b, LayerNorm, Linear, VarBuilder};
use std::sync::Arc;
#[derive(Debug, Clone, serde::Deserialize)]
pub struct Config {
vocab_size: usize,
hidden_size: usize,
intermediate_size: usize,
num_hidden_layers: usize,
num_attention_heads: usize,
num_key_value_heads: usize,
hidden_act: candle_nn::Activation,
max_position_embeddings: usize,
norm_epsilon: f64,
rope_theta: f64,
use_bias: bool,
sliding_window: Option<usize>,
}
#[derive(Debug, Clone)]
struct RotaryEmbedding {
sin: Tensor,
cos: Tensor,
}
fn rotate_half(xs: &Tensor) -> Result<Tensor> {
let last_dim = xs.dim(D::Minus1)?;
let xs1 = xs.narrow(D::Minus1, 0, last_dim / 2)?;
let xs2 = xs.narrow(D::Minus1, last_dim / 2, last_dim - last_dim / 2)?;
Tensor::cat(&[&xs2.neg()?, &xs1], D::Minus1)
}
impl RotaryEmbedding {
fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> {
let dim = cfg.hidden_size / cfg.num_attention_heads;
let max_seq_len = cfg.max_position_embeddings;
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / cfg.rope_theta.powf(i as f64 / dim as f64) as f32)
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?;
let t = Tensor::arange(0u32, max_seq_len as u32, dev)?
.to_dtype(dtype)?
.reshape((max_seq_len, 1))?;
let freqs = t.matmul(&inv_freq)?;
let freqs = Tensor::cat(&[&freqs, &freqs], D::Minus1)?;
Ok(Self {
sin: freqs.sin()?,
cos: freqs.cos()?,
})
}
fn apply_rotary_emb_qkv(
&self,
q: &Tensor,
k: &Tensor,
seqlen_offset: usize,
) -> Result<(Tensor, Tensor)> {
let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?;
let cos = self.cos.narrow(0, seqlen_offset, seq_len)?;
let sin = self.sin.narrow(0, seqlen_offset, seq_len)?;
let cos = cos.unsqueeze(0)?.unsqueeze(0)?; // (1, 1, seq_len, dim)
let sin = sin.unsqueeze(0)?.unsqueeze(0)?; // (1, 1, seq_len, dim)
let q_embed = (q.broadcast_mul(&cos)? + rotate_half(q)?.broadcast_mul(&sin))?;
let k_embed = (k.broadcast_mul(&cos)? + rotate_half(k)?.broadcast_mul(&sin))?;
Ok((q_embed, k_embed))
}
}
#[derive(Debug, Clone)]
#[allow(clippy::upper_case_acronyms)]
struct MLP {
c_fc: Linear,
c_proj: Linear,
act: candle_nn::Activation,
}
impl MLP {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let (h_size, i_size) = (cfg.hidden_size, cfg.intermediate_size);
let c_fc = linear_b(h_size, i_size, cfg.use_bias, vb.pp("c_fc"))?;
let c_proj = linear_b(i_size, h_size, cfg.use_bias, vb.pp("c_proj"))?;
Ok(Self {
c_fc,
c_proj,
act: cfg.hidden_act,
})
}
}
impl Module for MLP {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.c_fc)?.apply(&self.act)?.apply(&self.c_proj)
}
}
#[derive(Debug, Clone)]
struct Attention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
o_proj: Linear,
num_heads: usize,
num_kv_heads: usize,
num_kv_groups: usize,
head_dim: usize,
hidden_size: usize,
rotary_emb: Arc<RotaryEmbedding>,
kv_cache: Option<(Tensor, Tensor)>,
}
impl Attention {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let num_heads = cfg.num_attention_heads;
let num_kv_heads = cfg.num_key_value_heads;
let num_kv_groups = num_heads / num_kv_heads;
let head_dim = hidden_sz / num_heads;
let b = cfg.use_bias;
let q_proj = linear_b(hidden_sz, num_heads * head_dim, b, vb.pp("q_proj"))?;
let k_proj = linear_b(hidden_sz, num_kv_heads * head_dim, b, vb.pp("k_proj"))?;
let v_proj = linear_b(hidden_sz, num_kv_heads * head_dim, b, vb.pp("v_proj"))?;
let o_proj = linear_b(num_heads * head_dim, hidden_sz, b, vb.pp("o_proj"))?;
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
num_heads,
num_kv_heads,
num_kv_groups,
head_dim,
hidden_size: hidden_sz,
rotary_emb,
kv_cache: None,
})
}
fn repeat_kv(&self, xs: Tensor) -> Result<Tensor> {
let n_rep = self.num_kv_groups;
if n_rep == 1 {
Ok(xs)
} else {
let (b_sz, num_kv_heads, seq_len, head_dim) = xs.dims4()?;
xs.unsqueeze(2)?
.expand((b_sz, num_kv_heads, n_rep, seq_len, head_dim))?
.reshape((b_sz, num_kv_heads * n_rep, seq_len, head_dim))
}
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let (b_sz, q_len, _) = xs.dims3()?;
let query_states = self.q_proj.forward(xs)?;
let key_states = self.k_proj.forward(xs)?;
let value_states = self.v_proj.forward(xs)?;
let query_states = query_states
.reshape((b_sz, q_len, self.num_heads, self.head_dim))?
.transpose(1, 2)?;
let key_states = key_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let value_states = value_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let (query_states, key_states) =
self.rotary_emb
.apply_rotary_emb_qkv(&query_states, &key_states, seqlen_offset)?;
let (key_states, value_states) = match &self.kv_cache {
None => (key_states, value_states),
Some((prev_k, prev_v)) => {
let key_states = Tensor::cat(&[prev_k, &key_states], 2)?;
let value_states = Tensor::cat(&[prev_v, &value_states], 2)?;
(key_states, value_states)
}
};
self.kv_cache = Some((key_states.clone(), value_states.clone()));
let key_states = self.repeat_kv(key_states)?;
let value_states = self.repeat_kv(value_states)?;
let scale = 1f64 / f64::sqrt(self.head_dim as f64);
let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?;
let attn_weights = match attention_mask {
None => attn_weights,
Some(mask) => attn_weights.broadcast_add(mask)?,
};
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
let attn_output = attn_weights.matmul(&value_states)?;
attn_output
.transpose(1, 2)?
.reshape((b_sz, q_len, self.hidden_size))?
.apply(&self.o_proj)
}
fn clear_kv_cache(&mut self) {
self.kv_cache = None
}
}
#[derive(Debug, Clone)]
struct DecoderLayer {
self_attn: Attention,
mlp: MLP,
input_layernorm: LayerNorm,
post_attention_layernorm: LayerNorm,
}
impl DecoderLayer {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?;
let mlp = MLP::new(cfg, vb.pp("mlp"))?;
let input_layernorm =
layer_norm(cfg.hidden_size, cfg.norm_epsilon, vb.pp("input_layernorm"))?;
let post_attention_layernorm = layer_norm(
cfg.hidden_size,
cfg.norm_epsilon,
vb.pp("post_attention_layernorm"),
)?;
Ok(Self {
self_attn,
mlp,
input_layernorm,
post_attention_layernorm,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let residual = xs;
let xs = self.input_layernorm.forward(xs)?;
let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = xs.apply(&self.post_attention_layernorm)?.apply(&self.mlp)?;
residual + xs
}
fn clear_kv_cache(&mut self) {
self.self_attn.clear_kv_cache()
}
}
#[derive(Debug, Clone)]
pub struct Model {
embed_tokens: candle_nn::Embedding,
layers: Vec<DecoderLayer>,
norm: LayerNorm,
lm_head: Linear,
sliding_window: Option<usize>,
device: Device,
dtype: DType,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb_m = vb.pp("model");
let embed_tokens =
candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?;
let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb_m.device())?);
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
let vb_l = vb_m.pp("layers");
for layer_idx in 0..cfg.num_hidden_layers {
let layer = DecoderLayer::new(rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?;
layers.push(layer)
}
let norm = layer_norm(cfg.hidden_size, cfg.norm_epsilon, vb_m.pp("norm"))?;
let lm_head = candle_nn::Linear::new(embed_tokens.embeddings().clone(), None);
Ok(Self {
embed_tokens,
layers,
norm,
lm_head,
sliding_window: cfg.sliding_window,
device: vb.device().clone(),
dtype: vb.dtype(),
})
}
fn prepare_decoder_attention_mask(
&self,
b_size: usize,
tgt_len: usize,
seqlen_offset: usize,
) -> Result<Tensor> {
let sliding_window = self.sliding_window.unwrap_or(tgt_len + 42);
let mask: Vec<_> = (0..tgt_len)
.flat_map(|i| {
(0..tgt_len).map(move |j| {
if i < j || j + sliding_window < i {
f32::NEG_INFINITY
} else {
0.
}
})
})
.collect();
let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?;
let mask = if seqlen_offset > 0 {
let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?;
Tensor::cat(&[&mask0, &mask], D::Minus1)?
} else {
mask
};
mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))?
.to_dtype(self.dtype)
}
pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> {
let (b_size, seq_len) = input_ids.dims2()?;
let attention_mask = if seq_len <= 1 {
None
} else {
let mask = self.prepare_decoder_attention_mask(b_size, seq_len, seqlen_offset)?;
Some(mask)
};
let mut xs = self.embed_tokens.forward(input_ids)?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)?
}
xs.narrow(1, seq_len - 1, 1)?
.apply(&self.norm)?
.apply(&self.lm_head)
}
pub fn clear_kv_cache(&mut self) {
for layer in self.layers.iter_mut() {
layer.clear_kv_cache()
}
}
}
| candle/candle-transformers/src/models/starcoder2.rs/0 | {
"file_path": "candle/candle-transformers/src/models/starcoder2.rs",
"repo_id": "candle",
"token_count": 6129
} | 42 |
use super::common::{AttnBlock, ResBlock, TimestepBlock};
use candle::{DType, Result, Tensor, D};
use candle_nn::VarBuilder;
#[derive(Debug)]
struct Block {
res_block: ResBlock,
ts_block: TimestepBlock,
attn_block: AttnBlock,
}
#[derive(Debug)]
pub struct WPrior {
projection: candle_nn::Conv2d,
cond_mapper_lin1: candle_nn::Linear,
cond_mapper_lin2: candle_nn::Linear,
blocks: Vec<Block>,
out_ln: super::common::WLayerNorm,
out_conv: candle_nn::Conv2d,
c_r: usize,
}
impl WPrior {
#[allow(clippy::too_many_arguments)]
pub fn new(
c_in: usize,
c: usize,
c_cond: usize,
c_r: usize,
depth: usize,
nhead: usize,
use_flash_attn: bool,
vb: VarBuilder,
) -> Result<Self> {
let projection = candle_nn::conv2d(c_in, c, 1, Default::default(), vb.pp("projection"))?;
let cond_mapper_lin1 = candle_nn::linear(c_cond, c, vb.pp("cond_mapper.0"))?;
let cond_mapper_lin2 = candle_nn::linear(c, c, vb.pp("cond_mapper.2"))?;
let out_ln = super::common::WLayerNorm::new(c)?;
let out_conv = candle_nn::conv2d(c, c_in * 2, 1, Default::default(), vb.pp("out.1"))?;
let mut blocks = Vec::with_capacity(depth);
for index in 0..depth {
let res_block = ResBlock::new(c, 0, 3, vb.pp(format!("blocks.{}", 3 * index)))?;
let ts_block = TimestepBlock::new(c, c_r, vb.pp(format!("blocks.{}", 3 * index + 1)))?;
let attn_block = AttnBlock::new(
c,
c,
nhead,
true,
use_flash_attn,
vb.pp(format!("blocks.{}", 3 * index + 2)),
)?;
blocks.push(Block {
res_block,
ts_block,
attn_block,
})
}
Ok(Self {
projection,
cond_mapper_lin1,
cond_mapper_lin2,
blocks,
out_ln,
out_conv,
c_r,
})
}
pub fn gen_r_embedding(&self, r: &Tensor) -> Result<Tensor> {
const MAX_POSITIONS: usize = 10000;
let r = (r * MAX_POSITIONS as f64)?;
let half_dim = self.c_r / 2;
let emb = (MAX_POSITIONS as f64).ln() / (half_dim - 1) as f64;
let emb = (Tensor::arange(0u32, half_dim as u32, r.device())?.to_dtype(DType::F32)?
* -emb)?
.exp()?;
let emb = r.unsqueeze(1)?.broadcast_mul(&emb.unsqueeze(0)?)?;
let emb = Tensor::cat(&[emb.sin()?, emb.cos()?], 1)?;
let emb = if self.c_r % 2 == 1 {
emb.pad_with_zeros(D::Minus1, 0, 1)?
} else {
emb
};
emb.to_dtype(r.dtype())
}
pub fn forward(&self, xs: &Tensor, r: &Tensor, c: &Tensor) -> Result<Tensor> {
let x_in = xs;
let mut xs = xs.apply(&self.projection)?;
let c_embed = c
.apply(&self.cond_mapper_lin1)?
.apply(&|xs: &_| candle_nn::ops::leaky_relu(xs, 0.2))?
.apply(&self.cond_mapper_lin2)?;
let r_embed = self.gen_r_embedding(r)?;
for block in self.blocks.iter() {
xs = block.res_block.forward(&xs, None)?;
xs = block.ts_block.forward(&xs, &r_embed)?;
xs = block.attn_block.forward(&xs, &c_embed)?;
}
let ab = xs.apply(&self.out_ln)?.apply(&self.out_conv)?.chunk(2, 1)?;
(x_in - &ab[0])? / ((&ab[1] - 1.)?.abs()? + 1e-5)
}
}
| candle/candle-transformers/src/models/wuerstchen/prior.rs/0 | {
"file_path": "candle/candle-transformers/src/models/wuerstchen/prior.rs",
"repo_id": "candle",
"token_count": 1920
} | 43 |
export async function getEmbeddings(
worker,
weightsURL,
tokenizerURL,
configURL,
modelID,
sentences,
updateStatus = null
) {
return new Promise((resolve, reject) => {
worker.postMessage({
weightsURL,
tokenizerURL,
configURL,
modelID,
sentences,
});
function messageHandler(event) {
if ("error" in event.data) {
worker.removeEventListener("message", messageHandler);
reject(new Error(event.data.error));
}
if (event.data.status === "complete") {
worker.removeEventListener("message", messageHandler);
resolve(event.data);
}
if (updateStatus) updateStatus(event.data);
}
worker.addEventListener("message", messageHandler);
});
}
const MODELS = {
intfloat_e5_small_v2: {
base_url: "https://huggingface.co/intfloat/e5-small-v2/resolve/main/",
search_prefix: "query: ",
document_prefix: "passage: ",
},
intfloat_e5_base_v2: {
base_url: "https://huggingface.co/intfloat/e5-base-v2/resolve/main/",
search_prefix: "query: ",
document_prefix: "passage:",
},
intfloat_multilingual_e5_small: {
base_url:
"https://huggingface.co/intfloat/multilingual-e5-small/resolve/main/",
search_prefix: "query: ",
document_prefix: "passage: ",
},
sentence_transformers_all_MiniLM_L6_v2: {
base_url:
"https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2/resolve/refs%2Fpr%2F21/",
search_prefix: "",
document_prefix: "",
},
sentence_transformers_all_MiniLM_L12_v2: {
base_url:
"https://huggingface.co/sentence-transformers/all-MiniLM-L12-v2/resolve/refs%2Fpr%2F4/",
search_prefix: "",
document_prefix: "",
},
};
export function getModelInfo(id) {
return {
modelURL: MODELS[id].base_url + "model.safetensors",
configURL: MODELS[id].base_url + "config.json",
tokenizerURL: MODELS[id].base_url + "tokenizer.json",
search_prefix: MODELS[id].search_prefix,
document_prefix: MODELS[id].document_prefix,
};
}
export function cosineSimilarity(vec1, vec2) {
const dot = vec1.reduce((acc, val, i) => acc + val * vec2[i], 0);
const a = Math.sqrt(vec1.reduce((acc, val) => acc + val * val, 0));
const b = Math.sqrt(vec2.reduce((acc, val) => acc + val * val, 0));
return dot / (a * b);
}
export async function getWikiText(article) {
// thanks to wikipedia for the API
const URL = `https://en.wikipedia.org/w/api.php?action=query&prop=extracts&exlimit=1&titles=${article}&explaintext=1&exsectionformat=plain&format=json&origin=*`;
return fetch(URL, {
method: "GET",
headers: {
Accept: "application/json",
},
})
.then((r) => r.json())
.then((data) => {
const pages = data.query.pages;
const pageId = Object.keys(pages)[0];
const extract = pages[pageId].extract;
if (extract === undefined || extract === "") {
throw new Error("No article found");
}
return extract;
})
.catch((error) => console.error("Error:", error));
}
| candle/candle-wasm-examples/bert/utils.js/0 | {
"file_path": "candle/candle-wasm-examples/bert/utils.js",
"repo_id": "candle",
"token_count": 1250
} | 44 |
fn main() {
wasm_logger::init(wasm_logger::Config::new(log::Level::Trace));
console_error_panic_hook::set_once();
yew::Renderer::<candle_wasm_example_llama2::App>::new().render();
}
| candle/candle-wasm-examples/llama2-c/src/bin/app.rs/0 | {
"file_path": "candle/candle-wasm-examples/llama2-c/src/bin/app.rs",
"repo_id": "candle",
"token_count": 83
} | 45 |
<html>
<head>
<meta content="text/html;charset=utf-8" http-equiv="Content-Type" />
<title>Candle Segment Anything Model (SAM) Rust/WASM</title>
</head>
<body></body>
</html>
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<style>
@import url("https://fonts.googleapis.com/css2?family=Source+Code+Pro:wght@200;300;400&family=Source+Sans+3:wght@100;200;300;400;500;600;700;800;900&display=swap");
html,
body {
font-family: "Source Sans 3", sans-serif;
}
</style>
<script src="https://cdn.tailwindcss.com"></script>
<script type="module">
// base url for image examples
const MODEL_BASEURL =
"https://huggingface.co/lmz/candle-sam/resolve/main/";
// models base url
const MODELS = {
sam_mobile_tiny: {
url: "mobile_sam-tiny-vitt.safetensors",
},
sam_base: {
url: "sam_vit_b_01ec64.safetensors",
},
};
const samWorker = new Worker("./samWorker.js", { type: "module" });
async function segmentPoints(
modelURL, // URL to the weights file
modelID, // model ID
imageURL, // URL to the image file
points // {x, y} points to prompt image
) {
return new Promise((resolve, reject) => {
function messageHandler(event) {
console.log(event.data);
if ("status" in event.data) {
updateStatus(event.data);
}
if ("error" in event.data) {
samWorker.removeEventListener("message", messageHandler);
reject(new Error(event.data.error));
}
if (event.data.status === "complete-embedding") {
samWorker.removeEventListener("message", messageHandler);
resolve();
}
if (event.data.status === "complete") {
samWorker.removeEventListener("message", messageHandler);
resolve(event.data.output);
}
}
samWorker.addEventListener("message", messageHandler);
samWorker.postMessage({
modelURL,
modelID,
imageURL,
points,
});
});
}
function updateStatus(statusMessage) {
statusOutput.innerText = event.data.message;
}
let copyMaskURL = null;
let copyImageURL = null;
const clearBtn = document.querySelector("#clear-btn");
const maskBtn = document.querySelector("#mask-btn");
const undoBtn = document.querySelector("#undo-btn");
const downloadBtn = document.querySelector("#download-btn");
const canvas = document.querySelector("#canvas");
const mask = document.querySelector("#mask");
const ctxCanvas = canvas.getContext("2d");
const ctxMask = mask.getContext("2d");
const fileUpload = document.querySelector("#file-upload");
const dropArea = document.querySelector("#drop-area");
const dropButtons = document.querySelector("#drop-buttons");
const imagesExamples = document.querySelector("#image-select");
const modelSelection = document.querySelector("#model");
const statusOutput = document.querySelector("#output-status");
//add event listener to file input
fileUpload.addEventListener("input", (e) => {
const target = e.target;
if (target.files.length > 0) {
const href = URL.createObjectURL(target.files[0]);
clearImageCanvas();
copyImageURL = href;
drawImageCanvas(href);
setImageEmbeddings(href);
togglePointMode(false);
}
});
// add event listener to drop-area
dropArea.addEventListener("dragenter", (e) => {
e.preventDefault();
dropArea.classList.add("border-blue-700");
});
dropArea.addEventListener("dragleave", (e) => {
e.preventDefault();
dropArea.classList.remove("border-blue-700");
});
dropArea.addEventListener("dragover", (e) => {
e.preventDefault();
});
dropArea.addEventListener("drop", (e) => {
e.preventDefault();
dropArea.classList.remove("border-blue-700");
const url = e.dataTransfer.getData("text/uri-list");
const files = e.dataTransfer.files;
if (files.length > 0) {
const href = URL.createObjectURL(files[0]);
clearImageCanvas();
copyImageURL = href;
drawImageCanvas(href);
setImageEmbeddings(href);
togglePointMode(false);
} else if (url) {
clearImageCanvas();
copyImageURL = url;
drawImageCanvas(url);
setImageEmbeddings(url);
togglePointMode(false);
}
});
let hasImage = false;
let isSegmenting = false;
let isEmbedding = false;
let currentImageURL = "";
let pointArr = [];
let bgPointMode = false;
//add event listener to image examples
imagesExamples.addEventListener("click", (e) => {
if (isEmbedding || isSegmenting) {
return;
}
const target = e.target;
if (target.nodeName === "IMG") {
const href = target.src;
clearImageCanvas();
copyImageURL = href;
drawImageCanvas(href);
setImageEmbeddings(href);
}
});
//add event listener to mask button
maskBtn.addEventListener("click", () => {
togglePointMode();
});
//add event listener to clear button
clearBtn.addEventListener("click", () => {
clearImageCanvas();
togglePointMode(false);
pointArr = [];
});
//add event listener to undo button
undoBtn.addEventListener("click", () => {
undoPoint();
});
// add event to download btn
downloadBtn.addEventListener("click", async () => {
// Function to load image blobs as Image elements asynchronously
const loadImageAsync = (imageURL) => {
return new Promise((resolve) => {
const img = new Image();
img.onload = () => {
resolve(img);
};
img.crossOrigin = "anonymous";
img.src = imageURL;
});
};
const originalImage = await loadImageAsync(copyImageURL);
const maskImage = await loadImageAsync(copyMaskURL);
// create main a board to draw
const canvas = document.createElement("canvas");
const ctx = canvas.getContext("2d");
canvas.width = originalImage.width;
canvas.height = originalImage.height;
// Perform the mask operation
ctx.drawImage(maskImage, 0, 0);
ctx.globalCompositeOperation = "source-in";
ctx.drawImage(originalImage, 0, 0);
// to blob
const blobPromise = new Promise((resolve) => {
canvas.toBlob(resolve);
});
const blob = await blobPromise;
const resultURL = URL.createObjectURL(blob);
// download
const link = document.createElement("a");
link.href = resultURL;
link.download = "cutout.png";
link.click();
});
//add click event to canvas
canvas.addEventListener("click", async (event) => {
if (!hasImage || isEmbedding || isSegmenting) {
return;
}
const backgroundMode = event.shiftKey ? bgPointMode^event.shiftKey : bgPointMode;
const targetBox = event.target.getBoundingClientRect();
const x = (event.clientX - targetBox.left) / targetBox.width;
const y = (event.clientY - targetBox.top) / targetBox.height;
const ptsToRemove = [];
for (const [idx, pts] of pointArr.entries()) {
const d = Math.sqrt((pts[0] - x) ** 2 + (pts[1] - y) ** 2);
if (d < 6 / targetBox.width) {
ptsToRemove.push(idx);
}
}
if (ptsToRemove.length > 0) {
pointArr = pointArr.filter((_, idx) => !ptsToRemove.includes(idx));
} else {
pointArr = [...pointArr, [x, y, !backgroundMode]];
}
undoBtn.disabled = false;
downloadBtn.disabled = false;
if (pointArr.length == 0) {
ctxMask.clearRect(0, 0, canvas.width, canvas.height);
undoBtn.disabled = true;
downloadBtn.disabled = true;
return;
}
isSegmenting = true;
const { maskURL } = await getSegmentationMask(pointArr);
isSegmenting = false;
copyMaskURL = maskURL;
drawMask(maskURL, pointArr);
});
async function undoPoint() {
if (!hasImage || isEmbedding || isSegmenting) {
return;
}
if (pointArr.length === 0) {
return;
}
pointArr.pop();
if (pointArr.length === 0) {
ctxMask.clearRect(0, 0, canvas.width, canvas.height);
undoBtn.disabled = true;
return;
}
isSegmenting = true;
const { maskURL } = await getSegmentationMask(pointArr);
isSegmenting = false;
copyMaskURL = maskURL;
drawMask(maskURL, pointArr);
}
function togglePointMode(mode) {
bgPointMode = mode === undefined ? !bgPointMode : mode;
maskBtn.querySelector("span").innerText = bgPointMode
? "Background Point"
: "Mask Point";
if (bgPointMode) {
maskBtn.querySelector("#mask-circle").setAttribute("hidden", "");
maskBtn.querySelector("#unmask-circle").removeAttribute("hidden");
} else {
maskBtn.querySelector("#mask-circle").removeAttribute("hidden");
maskBtn.querySelector("#unmask-circle").setAttribute("hidden", "");
}
}
async function getSegmentationMask(points) {
const modelID = modelSelection.value;
const modelURL = MODEL_BASEURL + MODELS[modelID].url;
const imageURL = currentImageURL;
const { maskURL } = await segmentPoints(
modelURL,
modelID,
imageURL,
points
);
return { maskURL };
}
async function setImageEmbeddings(imageURL) {
if (isEmbedding) {
return;
}
canvas.classList.remove("cursor-pointer");
canvas.classList.add("cursor-wait");
clearBtn.disabled = true;
const modelID = modelSelection.value;
const modelURL = MODEL_BASEURL + MODELS[modelID].url;
isEmbedding = true;
await segmentPoints(modelURL, modelID, imageURL);
canvas.classList.remove("cursor-wait");
canvas.classList.add("cursor-pointer");
clearBtn.disabled = false;
isEmbedding = false;
currentImageURL = imageURL;
}
function clearImageCanvas() {
ctxCanvas.clearRect(0, 0, canvas.width, canvas.height);
ctxMask.clearRect(0, 0, canvas.width, canvas.height);
hasImage = false;
isEmbedding = false;
isSegmenting = false;
currentImageURL = "";
pointArr = [];
clearBtn.disabled = true;
canvas.parentElement.style.height = "auto";
dropButtons.classList.remove("invisible");
}
function drawMask(maskURL, points) {
if (!maskURL) {
throw new Error("No mask URL provided");
}
const img = new Image();
img.crossOrigin = "anonymous";
img.onload = () => {
mask.width = canvas.width;
mask.height = canvas.height;
ctxMask.save();
ctxMask.drawImage(canvas, 0, 0);
ctxMask.globalCompositeOperation = "source-atop";
ctxMask.fillStyle = "rgba(255, 0, 0, 0.6)";
ctxMask.fillRect(0, 0, canvas.width, canvas.height);
ctxMask.globalCompositeOperation = "destination-in";
ctxMask.drawImage(img, 0, 0);
ctxMask.globalCompositeOperation = "source-over";
for (const pt of points) {
if (pt[2]) {
ctxMask.fillStyle = "rgba(0, 255, 255, 1)";
} else {
ctxMask.fillStyle = "rgba(255, 255, 0, 1)";
}
ctxMask.beginPath();
ctxMask.arc(
pt[0] * canvas.width,
pt[1] * canvas.height,
3,
0,
2 * Math.PI
);
ctxMask.fill();
}
ctxMask.restore();
};
img.src = maskURL;
}
function drawImageCanvas(imgURL) {
if (!imgURL) {
throw new Error("No image URL provided");
}
ctxCanvas.clearRect(0, 0, canvas.width, canvas.height);
ctxCanvas.clearRect(0, 0, canvas.width, canvas.height);
const img = new Image();
img.crossOrigin = "anonymous";
img.onload = () => {
canvas.width = img.width;
canvas.height = img.height;
ctxCanvas.drawImage(img, 0, 0);
canvas.parentElement.style.height = canvas.offsetHeight + "px";
hasImage = true;
clearBtn.disabled = false;
dropButtons.classList.add("invisible");
};
img.src = imgURL;
}
const observer = new ResizeObserver((entries) => {
for (let entry of entries) {
if (entry.target === canvas) {
canvas.parentElement.style.height = canvas.offsetHeight + "px";
}
}
});
observer.observe(canvas);
</script>
</head>
<body class="container max-w-4xl mx-auto p-4">
<main class="grid grid-cols-1 gap-8 relative">
<span class="absolute text-5xl -ml-[1em]">🕯️</span>
<div>
<h1 class="text-5xl font-bold">Candle Segment Anything</h1>
<h2 class="text-2xl font-bold">Rust/WASM Demo</h2>
<p class="max-w-lg">
Zero-shot image segmentation with
<a
href="https://segment-anything.com"
class="underline hover:text-blue-500 hover:no-underline"
target="_blank"
>Segment Anything Model (SAM)</a
>
and
<a
href="https://github.com/ChaoningZhang/MobileSAM"
class="underline hover:text-blue-500 hover:no-underline"
target="_blank"
>MobileSAM </a
>. It runs in the browser with a WASM runtime built with
<a
href="https://github.com/huggingface/candle/"
target="_blank"
class="underline hover:text-blue-500 hover:no-underline"
>Candle
</a>
</p>
</div>
<div>
<label for="model" class="font-medium">Models Options: </label>
<select
id="model"
class="border-2 border-gray-500 rounded-md font-light">
<option value="sam_mobile_tiny" selected>
Mobile SAM Tiny (40.6 MB)
</option>
<option value="sam_base">SAM Base (375 MB)</option>
</select>
</div>
<div>
<p class="text-xs italic max-w-lg">
<b>Note:</b>
The model's first run may take a few seconds as it loads and caches
the model in the browser, and then creates the image embeddings. Any
subsequent clicks on points will be significantly faster.
</p>
</div>
<div class="relative max-w-2xl">
<div class="flex justify-between items-center">
<div class="px-2 rounded-md inline text-xs">
<span id="output-status" class="m-auto font-light"></span>
</div>
<div class="flex gap-2">
<button
id="mask-btn"
title="Toggle Mask Point and Background Point"
class="text-xs bg-white rounded-md disabled:opacity-50 flex gap-1 items-center">
<span>Mask Point</span>
<svg
xmlns="http://www.w3.org/2000/svg"
height="1em"
viewBox="0 0 512 512">
<path
id="mask-circle"
d="M256 512a256 256 0 1 0 0-512 256 256 0 1 0 0 512z" />
<path
id="unmask-circle"
hidden
d="M464 256a208 208 0 1 0-416 0 208 208 0 1 0 416 0zM0 256a256 256 0 1 1 512 0 256 256 0 1 1-512 0z" />
</svg>
</button>
<button
id="undo-btn"
disabled
title="Undo Last Point"
class="text-xs bg-white rounded-md disabled:opacity-50 flex gap-1 items-center">
<svg
xmlns="http://www.w3.org/2000/svg"
height="1em"
viewBox="0 0 512 512">
<path
d="M48.5 224H40a24 24 0 0 1-24-24V72a24 24 0 0 1 41-17l41.6 41.6a224 224 0 1 1-1 317.8 32 32 0 0 1 45.3-45.3 160 160 0 1 0 1-227.3L185 183a24 24 0 0 1-17 41H48.5z" />
</svg>
</button>
<button
id="clear-btn"
disabled
title="Clear Image"
class="text-xs bg-white rounded-md disabled:opacity-50 flex gap-1 items-center">
<svg
class=""
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 13 12"
height="1em">
<path
d="M1.6.7 12 11.1M12 .7 1.6 11.1"
stroke="#2E3036"
stroke-width="2" />
</svg>
</button>
</div>
</div>
<div
id="drop-area"
class="flex flex-col items-center justify-center border-2 border-gray-300 border-dashed rounded-xl relative p-20 w-full overflow-hidden">
<div
id="drop-buttons"
class="flex flex-col items-center justify-center space-y-1 text-center relative z-10">
<svg
width="25"
height="25"
viewBox="0 0 25 25"
fill="none"
xmlns="http://www.w3.org/2000/svg">
<path
d="M3.5 24.3a3 3 0 0 1-1.9-.8c-.5-.5-.8-1.2-.8-1.9V2.9c0-.7.3-1.3.8-1.9.6-.5 1.2-.7 2-.7h18.6c.7 0 1.3.2 1.9.7.5.6.7 1.2.7 2v18.6c0 .7-.2 1.4-.7 1.9a3 3 0 0 1-2 .8H3.6Zm0-2.7h18.7V2.9H3.5v18.7Zm2.7-2.7h13.3c.3 0 .5 0 .6-.3v-.7l-3.7-5a.6.6 0 0 0-.6-.2c-.2 0-.4 0-.5.3l-3.5 4.6-2.4-3.3a.6.6 0 0 0-.6-.3c-.2 0-.4.1-.5.3l-2.7 3.6c-.1.2-.2.4 0 .7.1.2.3.3.6.3Z"
fill="#000" />
</svg>
<div class="flex text-sm text-gray-600">
<label
for="file-upload"
class="relative cursor-pointer bg-white rounded-md font-medium text-blue-950 hover:text-blue-700">
<span>Drag and drop your image here</span>
<span class="block text-xs">or</span>
<span class="block text-xs">Click to upload</span>
</label>
</div>
<input
id="file-upload"
name="file-upload"
type="file"
class="sr-only" />
</div>
<canvas id="canvas" class="absolute w-full"></canvas>
<canvas
id="mask"
class="pointer-events-none absolute w-full"></canvas>
</div>
<div class="text-right py-2">
<button
id="share-btn"
class="bg-white rounded-md hover:outline outline-orange-200 disabled:opacity-50 invisible">
<img
src="https://huggingface.co/datasets/huggingface/badges/raw/main/share-to-community-sm.svg" />
</button>
<button
id="download-btn"
title="Copy result (.png)"
disabled
class="p-1 px-2 text-xs font-medium bg-white rounded-2xl outline outline-gray-200 hover:outline-orange-200 disabled:opacity-50"
>
Download Cut-Out
</button>
</div>
</div>
<div>
<div
class="flex gap-3 items-center overflow-x-scroll"
id="image-select">
<h3 class="font-medium">Examples:</h3>
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/candle/examples/sf.jpg"
class="cursor-pointer w-24 h-24 object-cover" />
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/candle/examples/bike.jpeg"
class="cursor-pointer w-24 h-24 object-cover" />
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/candle/examples/000000000077.jpg"
class="cursor-pointer w-24 h-24 object-cover" />
</div>
</div>
</main>
</body>
</html>
| candle/candle-wasm-examples/segment-anything/lib-example.html/0 | {
"file_path": "candle/candle-wasm-examples/segment-anything/lib-example.html",
"repo_id": "candle",
"token_count": 10333
} | 46 |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8" />
<title>Welcome to Candle!</title>
<link data-trunk rel="copy-file" href="yolov8s.safetensors" />
<link data-trunk rel="copy-file" href="bike.jpeg" />
<link data-trunk rel="rust" href="Cargo.toml" data-bin="app" data-type="main" />
<link data-trunk rel="rust" href="Cargo.toml" data-bin="worker" data-type="worker" />
<link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,300italic,700,700italic">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/milligram/1.4.1/milligram.css">
</head>
<body></body>
</html>
| candle/candle-wasm-examples/yolo/index.html/0 | {
"file_path": "candle/candle-wasm-examples/yolo/index.html",
"repo_id": "candle",
"token_count": 322
} | 47 |
backend-test:J
xytest"Relu
SingleReluZ
x
b
y
B | candle/test.onnx/0 | {
"file_path": "candle/test.onnx",
"repo_id": "candle",
"token_count": 76
} | 48 |
ENV_LOCAL_PATH=/app/.env.local
if test -z "${DOTENV_LOCAL}" ; then
if ! test -f "${ENV_LOCAL_PATH}" ; then
echo "DOTENV_LOCAL was not found in the ENV variables and .env.local is not set using a bind volume. We are using the default .env config."
fi;
else
echo "DOTENV_LOCAL was found in the ENV variables. Creating .env.local file."
cat <<< "$DOTENV_LOCAL" > ${ENV_LOCAL_PATH}
fi;
if [ "$INCLUDE_DB" = "true" ] ; then
echo "INCLUDE_DB is set to true."
MONGODB_CONFIG="MONGODB_URL=mongodb://localhost:27017"
if ! grep -q "^${MONGODB_CONFIG}$" ${ENV_LOCAL_PATH}; then
echo "Appending MONGODB_URL"
touch /app/.env.local
echo -e "\n${MONGODB_CONFIG}" >> ${ENV_LOCAL_PATH}
fi
mkdir -p /data/db
mongod &
echo "Starting local MongoDB instance"
fi;
npm run build
npm run preview -- --host 0.0.0.0 --port 3000 | chat-ui/entrypoint.sh/0 | {
"file_path": "chat-ui/entrypoint.sh",
"repo_id": "chat-ui",
"token_count": 385
} | 49 |
<script lang="ts">
import { afterUpdate } from "svelte";
import CopyToClipBoardBtn from "./CopyToClipBoardBtn.svelte";
export let code = "";
export let lang = "";
$: highlightedCode = "";
afterUpdate(async () => {
const { default: hljs } = await import("highlight.js");
const language = hljs.getLanguage(lang);
highlightedCode = hljs.highlightAuto(code, language?.aliases).value;
});
</script>
<div class="group relative my-4 rounded-lg">
<!-- eslint-disable svelte/no-at-html-tags -->
<pre
class="scrollbar-custom overflow-auto px-5 scrollbar-thumb-gray-500 hover:scrollbar-thumb-gray-400 dark:scrollbar-thumb-white/10 dark:hover:scrollbar-thumb-white/20"><code
class="language-{lang}">{@html highlightedCode || code.replaceAll("<", "<")}</code
></pre>
<CopyToClipBoardBtn
classNames="absolute top-2 right-2 invisible opacity-0 group-hover:visible group-hover:opacity-100"
value={code}
/>
</div>
| chat-ui/src/lib/components/CodeBlock.svelte/0 | {
"file_path": "chat-ui/src/lib/components/CodeBlock.svelte",
"repo_id": "chat-ui",
"token_count": 345
} | 50 |
<script lang="ts">
import { fade } from "svelte/transition";
import { onDestroy } from "svelte";
import IconChevron from "./icons/IconChevron.svelte";
export let scrollNode: HTMLElement;
export { className as class };
let visible = false;
let className = "";
let observer: ResizeObserver | null = null;
$: if (scrollNode) {
destroy();
if (window.ResizeObserver) {
observer = new ResizeObserver(() => {
updateVisibility();
});
observer.observe(scrollNode);
}
scrollNode.addEventListener("scroll", updateVisibility);
}
function updateVisibility() {
if (!scrollNode) return;
visible =
Math.ceil(scrollNode.scrollTop) + 200 < scrollNode.scrollHeight - scrollNode.clientHeight;
}
function destroy() {
observer?.disconnect();
scrollNode?.removeEventListener("scroll", updateVisibility);
}
onDestroy(destroy);
</script>
{#if visible}
<button
transition:fade={{ duration: 150 }}
on:click={() => scrollNode.scrollTo({ top: scrollNode.scrollHeight, behavior: "smooth" })}
class="btn absolute flex h-[41px] w-[41px] rounded-full border bg-white shadow-md transition-all hover:bg-gray-100 dark:border-gray-600 dark:bg-gray-700 dark:shadow-gray-950 dark:hover:bg-gray-600 {className}"
><IconChevron classNames="mt-[2px]" /></button
>
{/if}
| chat-ui/src/lib/components/ScrollToBottomBtn.svelte/0 | {
"file_path": "chat-ui/src/lib/components/ScrollToBottomBtn.svelte",
"repo_id": "chat-ui",
"token_count": 460
} | 51 |
<script lang="ts">
export let classNames = "";
</script>
<svg
xmlns="http://www.w3.org/2000/svg"
width="1em"
height="1em"
class={classNames}
fill="none"
viewBox="0 0 26 23"
>
<path
fill="url(#a)"
d="M.93 10.65A10.17 10.17 0 0 1 11.11.48h4.67a9.45 9.45 0 0 1 0 18.89H4.53L1.62 22.2a.38.38 0 0 1-.69-.28V10.65Z"
/>
<path
fill="#000"
fill-rule="evenodd"
d="M11.52 7.4a1.86 1.86 0 1 1-3.72 0 1.86 1.86 0 0 1 3.72 0Zm7.57 0a1.86 1.86 0 1 1-3.73 0 1.86 1.86 0 0 1 3.73 0ZM8.9 12.9a.55.55 0 0 0-.11.35.76.76 0 0 1-1.51 0c0-.95.67-1.94 1.76-1.94 1.09 0 1.76 1 1.76 1.94H9.3a.55.55 0 0 0-.12-.35c-.06-.07-.1-.08-.13-.08s-.08 0-.14.08Zm4.04 0a.55.55 0 0 0-.12.35h-1.51c0-.95.68-1.94 1.76-1.94 1.1 0 1.77 1 1.77 1.94h-1.51a.55.55 0 0 0-.12-.35c-.06-.07-.11-.08-.14-.08-.02 0-.07 0-.13.08Zm-1.89.79c-.02 0-.07-.01-.13-.08a.55.55 0 0 1-.12-.36h-1.5c0 .95.67 1.95 1.75 1.95 1.1 0 1.77-1 1.77-1.95h-1.51c0 .16-.06.28-.12.36-.06.07-.11.08-.14.08Zm4.04 0c-.03 0-.08-.01-.14-.08a.55.55 0 0 1-.12-.36h-1.5c0 .95.67 1.95 1.76 1.95 1.08 0 1.76-1 1.76-1.95h-1.51c0 .16-.06.28-.12.36-.06.07-.11.08-.13.08Zm1.76-.44c0-.16.05-.28.12-.35.06-.07.1-.08.13-.08s.08 0 .14.08c.06.07.11.2.11.35a.76.76 0 0 0 1.51 0c0-.95-.67-1.94-1.76-1.94-1.09 0-1.76 1-1.76 1.94h1.5Z"
clip-rule="evenodd"
/>
<defs>
<radialGradient
id="a"
cx="0"
cy="0"
r="1"
gradientTransform="matrix(0 31.37 -34.85 0 13.08 -9.02)"
gradientUnits="userSpaceOnUse"
>
<stop stop-color="#FFD21E" />
<stop offset="1" stop-color="red" />
</radialGradient>
</defs>
</svg>
| chat-ui/src/lib/components/icons/IconDazzled.svelte/0 | {
"file_path": "chat-ui/src/lib/components/icons/IconDazzled.svelte",
"repo_id": "chat-ui",
"token_count": 916
} | 52 |
import { z } from "zod";
import type { EmbeddingEndpoint, Embedding } from "../embeddingEndpoints";
import { chunk } from "$lib/utils/chunk";
import { OPENAI_API_KEY } from "$env/static/private";
export const embeddingEndpointOpenAIParametersSchema = z.object({
weight: z.number().int().positive().default(1),
model: z.any(),
type: z.literal("openai"),
url: z.string().url().default("https://api.openai.com/v1/embeddings"),
apiKey: z.string().default(OPENAI_API_KEY),
});
export async function embeddingEndpointOpenAI(
input: z.input<typeof embeddingEndpointOpenAIParametersSchema>
): Promise<EmbeddingEndpoint> {
const { url, model, apiKey } = embeddingEndpointOpenAIParametersSchema.parse(input);
const maxBatchSize = model.maxBatchSize || 100;
return async ({ inputs }) => {
const requestURL = new URL(url);
const batchesInputs = chunk(inputs, maxBatchSize);
const batchesResults = await Promise.all(
batchesInputs.map(async (batchInputs) => {
const response = await fetch(requestURL, {
method: "POST",
headers: {
Accept: "application/json",
"Content-Type": "application/json",
...(apiKey ? { Authorization: `Bearer ${apiKey}` } : {}),
},
body: JSON.stringify({ input: batchInputs, model: model.name }),
});
const embeddings: Embedding[] = [];
const responseObject = await response.json();
for (const embeddingObject of responseObject.data) {
embeddings.push(embeddingObject.embedding);
}
return embeddings;
})
);
const flatAllEmbeddings = batchesResults.flat();
return flatAllEmbeddings;
};
}
| chat-ui/src/lib/server/embeddingEndpoints/openai/embeddingEndpoints.ts/0 | {
"file_path": "chat-ui/src/lib/server/embeddingEndpoints/openai/embeddingEndpoints.ts",
"repo_id": "chat-ui",
"token_count": 584
} | 53 |
import { isURLLocal } from "./isURLLocal";
import { describe, expect, it } from "vitest";
describe("isURLLocal", async () => {
it("should return true for localhost", async () => {
expect(await isURLLocal(new URL("http://localhost"))).toBe(true);
});
it("should return true for 127.0.0.1", async () => {
expect(await isURLLocal(new URL("http://127.0.0.1"))).toBe(true);
});
it("should return true for 127.254.254.254", async () => {
expect(await isURLLocal(new URL("http://127.254.254.254"))).toBe(true);
});
it("should return false for huggingface.co", async () => {
expect(await isURLLocal(new URL("https://huggingface.co/"))).toBe(false);
});
it("should return true for 127.0.0.1.nip.io", async () => {
expect(await isURLLocal(new URL("http://127.0.0.1.nip.io"))).toBe(true);
});
it("should fail on ipv6", async () => {
await expect(isURLLocal(new URL("http://[::1]"))).rejects.toThrow();
});
it("should fail on ipv6 --1.sslip.io", async () => {
await expect(isURLLocal(new URL("http://--1.sslip.io"))).rejects.toThrow();
});
it("should fail on invalid domain names", async () => {
await expect(
isURLLocal(new URL("http://34329487239847329874923948732984.com/"))
).rejects.toThrow();
});
});
| chat-ui/src/lib/server/isURLLocal.spec.ts/0 | {
"file_path": "chat-ui/src/lib/server/isURLLocal.spec.ts",
"repo_id": "chat-ui",
"token_count": 492
} | 54 |
import { writable } from "svelte/store";
export const isAborted = writable<boolean>(false);
| chat-ui/src/lib/stores/isAborted.ts/0 | {
"file_path": "chat-ui/src/lib/stores/isAborted.ts",
"repo_id": "chat-ui",
"token_count": 30
} | 55 |
import type { ObjectId } from "mongodb";
import type { User } from "./User";
import type { Assistant } from "./Assistant";
import type { Timestamps } from "./Timestamps";
export interface Report extends Timestamps {
_id: ObjectId;
createdBy: User["_id"] | string;
assistantId: Assistant["_id"];
reason?: string;
}
| chat-ui/src/lib/types/Report.ts/0 | {
"file_path": "chat-ui/src/lib/types/Report.ts",
"repo_id": "chat-ui",
"token_count": 101
} | 56 |
const file2base64 = (file: File): Promise<string> => {
return new Promise<string>((resolve, reject) => {
const reader = new FileReader();
reader.readAsDataURL(file);
reader.onload = () => {
const dataUrl = reader.result as string;
const base64 = dataUrl.split(",")[1];
resolve(base64);
};
reader.onerror = (error) => reject(error);
});
};
export default file2base64;
| chat-ui/src/lib/utils/file2base64.ts/0 | {
"file_path": "chat-ui/src/lib/utils/file2base64.ts",
"repo_id": "chat-ui",
"token_count": 142
} | 57 |
import type { Message } from "$lib/types/Message";
import type { LegacyParamatersTemplateInput } from "$lib/types/Template";
import Handlebars from "handlebars";
Handlebars.registerHelper("ifUser", function (this: Pick<Message, "from" | "content">, options) {
if (this.from == "user") return options.fn(this);
});
Handlebars.registerHelper(
"ifAssistant",
function (this: Pick<Message, "from" | "content">, options) {
if (this.from == "assistant") return options.fn(this);
}
);
export function compileTemplate<T>(input: string, model: LegacyParamatersTemplateInput) {
const template = Handlebars.compile<T & LegacyParamatersTemplateInput>(input, {
knownHelpers: { ifUser: true, ifAssistant: true },
knownHelpersOnly: true,
noEscape: true,
strict: true,
preventIndent: true,
});
return function render(inputs: T, options?: RuntimeOptions) {
return template({ ...model, ...inputs }, options);
};
}
| chat-ui/src/lib/utils/template.ts/0 | {
"file_path": "chat-ui/src/lib/utils/template.ts",
"repo_id": "chat-ui",
"token_count": 290
} | 58 |
<script lang="ts">
import { goto } from "$app/navigation";
import { base } from "$app/paths";
import { PUBLIC_APP_NAME } from "$env/static/public";
import ChatWindow from "$lib/components/chat/ChatWindow.svelte";
import { ERROR_MESSAGES, error } from "$lib/stores/errors";
import { pendingMessage } from "$lib/stores/pendingMessage";
import { useSettingsStore } from "$lib/stores/settings.js";
import { findCurrentModel } from "$lib/utils/models";
export let data;
let loading = false;
let files: File[] = [];
const settings = useSettingsStore();
async function createConversation(message: string) {
try {
loading = true;
// check if $settings.activeModel is a valid model
// else check if it's an assistant, and use that model
// else use the first model
const validModels = data.models.map((model) => model.id);
let model;
if (validModels.includes($settings.activeModel)) {
model = $settings.activeModel;
} else {
if (validModels.includes(data.assistant?.modelId)) {
model = data.assistant?.modelId;
} else {
model = data.models[0].id;
}
}
const res = await fetch(`${base}/conversation`, {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
model,
preprompt: $settings.customPrompts[$settings.activeModel],
assistantId: data.assistant?._id,
}),
});
if (!res.ok) {
const errorMessage = (await res.json()).message || ERROR_MESSAGES.default;
error.set(errorMessage);
console.error("Error while creating conversation: ", errorMessage);
return;
}
const { conversationId } = await res.json();
// Ugly hack to use a store as temp storage, feel free to improve ^^
pendingMessage.set({
content: message,
files,
});
// invalidateAll to update list of conversations
await goto(`${base}/conversation/${conversationId}`, { invalidateAll: true });
} catch (err) {
error.set((err as Error).message || ERROR_MESSAGES.default);
console.error(err);
} finally {
loading = false;
}
}
</script>
<svelte:head>
<title>{PUBLIC_APP_NAME}</title>
</svelte:head>
<ChatWindow
on:message={(ev) => createConversation(ev.detail)}
{loading}
assistant={data.assistant}
currentModel={findCurrentModel([...data.models, ...data.oldModels], $settings.activeModel)}
models={data.models}
bind:files
/>
| chat-ui/src/routes/+page.svelte/0 | {
"file_path": "chat-ui/src/routes/+page.svelte",
"repo_id": "chat-ui",
"token_count": 896
} | 59 |
import { MESSAGES_BEFORE_LOGIN, ENABLE_ASSISTANTS_RAG } from "$env/static/private";
import { authCondition, requiresUser } from "$lib/server/auth";
import { collections } from "$lib/server/database";
import { models } from "$lib/server/models";
import { ERROR_MESSAGES } from "$lib/stores/errors";
import type { Message } from "$lib/types/Message";
import { error } from "@sveltejs/kit";
import { ObjectId } from "mongodb";
import { z } from "zod";
import type { MessageUpdate } from "$lib/types/MessageUpdate";
import { runWebSearch } from "$lib/server/websearch/runWebSearch";
import { abortedGenerations } from "$lib/server/abortedGenerations";
import { summarize } from "$lib/server/summarize";
import { uploadFile } from "$lib/server/files/uploadFile";
import sizeof from "image-size";
import type { Assistant } from "$lib/types/Assistant";
import { convertLegacyConversation } from "$lib/utils/tree/convertLegacyConversation";
import { isMessageId } from "$lib/utils/tree/isMessageId";
import { buildSubtree } from "$lib/utils/tree/buildSubtree.js";
import { addChildren } from "$lib/utils/tree/addChildren.js";
import { addSibling } from "$lib/utils/tree/addSibling.js";
import { preprocessMessages } from "$lib/server/preprocessMessages.js";
import { usageLimits } from "$lib/server/usageLimits";
export async function POST({ request, locals, params, getClientAddress }) {
const id = z.string().parse(params.id);
const convId = new ObjectId(id);
const promptedAt = new Date();
const userId = locals.user?._id ?? locals.sessionId;
// check user
if (!userId) {
throw error(401, "Unauthorized");
}
// check if the user has access to the conversation
const convBeforeCheck = await collections.conversations.findOne({
_id: convId,
...authCondition(locals),
});
if (convBeforeCheck && !convBeforeCheck.rootMessageId) {
const res = await collections.conversations.updateOne(
{
_id: convId,
},
{
$set: {
...convBeforeCheck,
...convertLegacyConversation(convBeforeCheck),
},
}
);
if (!res.acknowledged) {
throw error(500, "Failed to convert conversation");
}
}
const conv = await collections.conversations.findOne({
_id: convId,
...authCondition(locals),
});
if (!conv) {
throw error(404, "Conversation not found");
}
// register the event for ratelimiting
await collections.messageEvents.insertOne({
userId,
createdAt: new Date(),
ip: getClientAddress(),
});
const messagesBeforeLogin = MESSAGES_BEFORE_LOGIN ? parseInt(MESSAGES_BEFORE_LOGIN) : 0;
// guest mode check
if (!locals.user?._id && requiresUser && messagesBeforeLogin) {
const totalMessages =
(
await collections.conversations
.aggregate([
{ $match: { ...authCondition(locals), "messages.from": "assistant" } },
{ $project: { messages: 1 } },
{ $limit: messagesBeforeLogin + 1 },
{ $unwind: "$messages" },
{ $match: { "messages.from": "assistant" } },
{ $count: "messages" },
])
.toArray()
)[0]?.messages ?? 0;
if (totalMessages > messagesBeforeLogin) {
throw error(429, "Exceeded number of messages before login");
}
}
if (usageLimits?.messagesPerMinute) {
// check if the user is rate limited
const nEvents = Math.max(
await collections.messageEvents.countDocuments({ userId }),
await collections.messageEvents.countDocuments({ ip: getClientAddress() })
);
if (nEvents > usageLimits.messagesPerMinute) {
throw error(429, ERROR_MESSAGES.rateLimited);
}
}
if (usageLimits?.messages && conv.messages.length > usageLimits.messages) {
throw error(
429,
`This conversation has more than ${usageLimits.messages} messages. Start a new one to continue`
);
}
// fetch the model
const model = models.find((m) => m.id === conv.model);
if (!model) {
throw error(410, "Model not available anymore");
}
// finally parse the content of the request
const json = await request.json();
const {
inputs: newPrompt,
id: messageId,
is_retry: isRetry,
is_continue: isContinue,
web_search: webSearch,
files: b64files,
} = z
.object({
id: z.string().uuid().refine(isMessageId).optional(), // parent message id to append to for a normal message, or the message id for a retry/continue
inputs: z.optional(
z
.string()
.trim()
.min(1)
.transform((s) => s.replace(/\r\n/g, "\n"))
),
is_retry: z.optional(z.boolean()),
is_continue: z.optional(z.boolean()),
web_search: z.optional(z.boolean()),
files: z.optional(z.array(z.string())),
})
.parse(json);
if (usageLimits?.messageLength && (newPrompt?.length ?? 0) > usageLimits.messageLength) {
throw error(400, "Message too long.");
}
// files is an array of base64 strings encoding Blob objects
// we need to convert this array to an array of File objects
const files = b64files?.map((file) => {
const blob = Buffer.from(file, "base64");
return new File([blob], "image.png");
});
// check sizes
if (files) {
const filechecks = await Promise.all(
files.map(async (file) => {
const dimensions = sizeof(Buffer.from(await file.arrayBuffer()));
return (
file.size > 2 * 1024 * 1024 ||
(dimensions.width ?? 0) > 224 ||
(dimensions.height ?? 0) > 224
);
})
);
if (filechecks.some((check) => check)) {
throw error(413, "File too large, should be <2MB and 224x224 max.");
}
}
let hashes: undefined | string[];
if (files) {
hashes = await Promise.all(files.map(async (file) => await uploadFile(file, conv)));
}
// we will append tokens to the content of this message
let messageToWriteToId: Message["id"] | undefined = undefined;
// used for building the prompt, subtree of the conversation that goes from the latest message to the root
let messagesForPrompt: Message[] = [];
if (isContinue && messageId) {
// if it's the last message and we continue then we build the prompt up to the last message
// we will strip the end tokens afterwards when the prompt is built
if ((conv.messages.find((msg) => msg.id === messageId)?.children?.length ?? 0) > 0) {
throw error(400, "Can only continue the last message");
}
messageToWriteToId = messageId;
messagesForPrompt = buildSubtree(conv, messageId);
} else if (isRetry && messageId) {
// two cases, if we're retrying a user message with a newPrompt set,
// it means we're editing a user message
// if we're retrying on an assistant message, newPrompt cannot be set
// it means we're retrying the last assistant message for a new answer
const messageToRetry = conv.messages.find((message) => message.id === messageId);
if (!messageToRetry) {
throw error(404, "Message not found");
}
if (messageToRetry.from === "user" && newPrompt) {
// add a sibling to this message from the user, with the alternative prompt
// add a children to that sibling, where we can write to
const newUserMessageId = addSibling(conv, { from: "user", content: newPrompt }, messageId);
messageToWriteToId = addChildren(
conv,
{ from: "assistant", content: "", files: hashes },
newUserMessageId
);
messagesForPrompt = buildSubtree(conv, newUserMessageId);
} else if (messageToRetry.from === "assistant") {
// we're retrying an assistant message, to generate a new answer
// just add a sibling to the assistant answer where we can write to
messageToWriteToId = addSibling(conv, { from: "assistant", content: "" }, messageId);
messagesForPrompt = buildSubtree(conv, messageId);
messagesForPrompt.pop(); // don't need the latest assistant message in the prompt since we're retrying it
}
} else {
// just a normal linear conversation, so we add the user message
// and the blank assistant message back to back
const newUserMessageId = addChildren(
conv,
{
from: "user",
content: newPrompt ?? "",
files: hashes,
createdAt: new Date(),
updatedAt: new Date(),
},
messageId
);
messageToWriteToId = addChildren(
conv,
{
from: "assistant",
content: "",
createdAt: new Date(),
updatedAt: new Date(),
},
newUserMessageId
);
// build the prompt from the user message
messagesForPrompt = buildSubtree(conv, newUserMessageId);
}
const messageToWriteTo = conv.messages.find((message) => message.id === messageToWriteToId);
if (!messageToWriteTo) {
throw error(500, "Failed to create message");
}
if (messagesForPrompt.length === 0) {
throw error(500, "Failed to create prompt");
}
// update the conversation with the new messages
await collections.conversations.updateOne(
{
_id: convId,
},
{
$set: {
messages: conv.messages,
title: conv.title,
updatedAt: new Date(),
},
}
);
let doneStreaming = false;
// we now build the stream
const stream = new ReadableStream({
async start(controller) {
messageToWriteTo.updates ??= [];
function update(newUpdate: MessageUpdate) {
if (newUpdate.type !== "stream") {
messageToWriteTo?.updates?.push(newUpdate);
}
if (newUpdate.type === "stream" && newUpdate.token === "") {
return;
}
controller.enqueue(JSON.stringify(newUpdate) + "\n");
if (newUpdate.type === "finalAnswer") {
// 4096 of spaces to make sure the browser doesn't blocking buffer that holding the response
controller.enqueue(" ".repeat(4096));
}
}
update({ type: "status", status: "started" });
const summarizeIfNeeded = (async () => {
if (conv.title === "New Chat" && conv.messages.length === 3) {
try {
conv.title = (await summarize(conv.messages[1].content)) ?? conv.title;
update({ type: "status", status: "title", message: conv.title });
await collections.conversations.updateOne(
{
_id: convId,
},
{
$set: {
title: conv?.title,
updatedAt: new Date(),
},
}
);
} catch (e) {
console.error(e);
}
}
})();
await collections.conversations.updateOne(
{
_id: convId,
},
{
$set: {
title: conv.title,
updatedAt: new Date(),
},
}
);
// check if assistant has a rag
const rag = (
await collections.assistants.findOne<Pick<Assistant, "rag">>(
{ _id: conv.assistantId },
{ projection: { rag: 1 } }
)
)?.rag;
const assistantHasRAG =
ENABLE_ASSISTANTS_RAG === "true" &&
rag &&
(rag.allowedLinks.length > 0 || rag.allowedDomains.length > 0 || rag.allowAllDomains);
// perform websearch if needed
if (!isContinue && ((webSearch && !conv.assistantId) || assistantHasRAG)) {
messageToWriteTo.webSearch = await runWebSearch(conv, messagesForPrompt, update, rag);
}
// inject websearch result & optionally images into the messages
const processedMessages = await preprocessMessages(
messagesForPrompt,
messageToWriteTo.webSearch,
model.multimodal,
convId
);
const previousText = messageToWriteTo.content;
try {
const endpoint = await model.getEndpoint();
for await (const output of await endpoint({
messages: processedMessages,
preprompt: conv.preprompt,
continueMessage: isContinue,
})) {
// if not generated_text is here it means the generation is not done
if (!output.generated_text) {
// else we get the next token
if (!output.token.special) {
update({
type: "stream",
token: output.token.text,
});
// abort check
const date = abortedGenerations.get(convId.toString());
if (date && date > promptedAt) {
break;
}
// no output check
if (!output) {
break;
}
// otherwise we just concatenate tokens
messageToWriteTo.content += output.token.text;
}
} else {
messageToWriteTo.interrupted = !output.token.special;
// add output.generated text to the last message
// strip end tokens from the output.generated_text
const text = (model.parameters.stop ?? []).reduce((acc: string, curr: string) => {
if (acc.endsWith(curr)) {
messageToWriteTo.interrupted = false;
return acc.slice(0, acc.length - curr.length);
}
return acc;
}, output.generated_text.trimEnd());
messageToWriteTo.content = previousText + text;
messageToWriteTo.updatedAt = new Date();
}
}
} catch (e) {
update({ type: "status", status: "error", message: (e as Error).message });
} finally {
// check if no output was generated
if (messageToWriteTo.content === previousText) {
update({
type: "status",
status: "error",
message: "No output was generated. Something went wrong.",
});
}
}
await collections.conversations.updateOne(
{
_id: convId,
},
{
$set: {
messages: conv.messages,
title: conv?.title,
updatedAt: new Date(),
},
}
);
// used to detect if cancel() is called bc of interrupt or just because the connection closes
doneStreaming = true;
update({
type: "finalAnswer",
text: messageToWriteTo.content,
});
await summarizeIfNeeded;
controller.close();
return;
},
async cancel() {
if (!doneStreaming) {
await collections.conversations.updateOne(
{
_id: convId,
},
{
$set: {
messages: conv.messages,
title: conv.title,
updatedAt: new Date(),
},
}
);
}
},
});
// Todo: maybe we should wait for the message to be saved before ending the response - in case of errors
return new Response(stream, {
headers: {
"Content-Type": "text/event-stream",
},
});
}
export async function DELETE({ locals, params }) {
const convId = new ObjectId(params.id);
const conv = await collections.conversations.findOne({
_id: convId,
...authCondition(locals),
});
if (!conv) {
throw error(404, "Conversation not found");
}
await collections.conversations.deleteOne({ _id: conv._id });
return new Response();
}
export async function PATCH({ request, locals, params }) {
const { title } = z
.object({ title: z.string().trim().min(1).max(100) })
.parse(await request.json());
const convId = new ObjectId(params.id);
const conv = await collections.conversations.findOne({
_id: convId,
...authCondition(locals),
});
if (!conv) {
throw error(404, "Conversation not found");
}
await collections.conversations.updateOne(
{
_id: convId,
},
{
$set: {
title,
},
}
);
return new Response();
}
| chat-ui/src/routes/conversation/[id]/+server.ts/0 | {
"file_path": "chat-ui/src/routes/conversation/[id]/+server.ts",
"repo_id": "chat-ui",
"token_count": 5547
} | 60 |
<script lang="ts">
import { PUBLIC_APP_COLOR } from "$env/static/public";
import { isHuggingChat } from "$lib/utils/isHuggingChat";
export let name: string;
export let logoUrl: string | undefined;
import logo from "../../../../../static/huggingchat/logo.svg?raw";
</script>
<div class=" flex h-[648px] w-full flex-col items-center bg-white">
<div class="flex flex-1 flex-col items-center justify-center">
{#if logoUrl}
<img class="h-48 w-48" src={logoUrl} alt="avatar" />
{/if}
<h1 class="m-0 text-5xl font-bold text-black">
{name}
</h1>
</div>
<div
class="flex h-[200px] w-full flex-col items-center justify-center rounded-b-none bg-{PUBLIC_APP_COLOR}-500/10 pb-10 pt-10 text-4xl text-gray-500"
style="border-radius: 100% 100% 0 0;"
>
Try it now
{#if isHuggingChat}
on
{/if}
{#if isHuggingChat}
<div class="flex flex-row pt-3 text-5xl font-bold text-black">
<div class="mr-5 flex items-center justify-center" id="logo">
<!-- eslint-disable-next-line -->
{@html logo}
</div>
<span>HuggingChat</span>
</div>
{/if}
</div>
</div>
| chat-ui/src/routes/models/[...model]/thumbnail.png/ModelThumbnail.svelte/0 | {
"file_path": "chat-ui/src/routes/models/[...model]/thumbnail.png/ModelThumbnail.svelte",
"repo_id": "chat-ui",
"token_count": 475
} | 61 |
<script lang="ts">
import type { ActionData, PageData } from "./$types";
import AssistantSettings from "$lib/components/AssistantSettings.svelte";
export let data: PageData;
export let form: ActionData;
</script>
<AssistantSettings bind:form models={data.models} />
| chat-ui/src/routes/settings/(nav)/assistants/new/[email protected]/0 | {
"file_path": "chat-ui/src/routes/settings/(nav)/assistants/new/[email protected]",
"repo_id": "chat-ui",
"token_count": 80
} | 62 |
{
"background_color": "#ffffff",
"name": "HuggingChat",
"short_name": "HuggingChat",
"display": "standalone",
"start_url": "/chat",
"icons": [
{
"src": "/chat/huggingchat/icon-128x128.png",
"sizes": "128x128",
"type": "image/png"
},
{
"src": "/chat/huggingchat/icon-256x256.png",
"sizes": "256x256",
"type": "image/png"
},
{
"src": "/chat/huggingchat/icon-512x512.png",
"sizes": "512x512",
"type": "image/png"
}
]
}
| chat-ui/static/huggingchat/manifest.json/0 | {
"file_path": "chat-ui/static/huggingchat/manifest.json",
"repo_id": "chat-ui",
"token_count": 233
} | 63 |
import json
import os
import tempfile
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features import Array2D
from utils import generate_examples, get_duration
SHAPE_TEST_1 = (30, 487)
SHAPE_TEST_2 = (36, 1024)
SPEED_TEST_SHAPE = (100, 100)
SPEED_TEST_N_EXAMPLES = 100
DEFAULT_FEATURES = datasets.Features(
{"text": Array2D(SHAPE_TEST_1, dtype="float32"), "image": Array2D(SHAPE_TEST_2, dtype="float32")}
)
RESULTS_BASEPATH, RESULTS_FILENAME = os.path.split(__file__)
RESULTS_FILE_PATH = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def write(my_features, dummy_data, tmp_dir):
with ArrowWriter(features=my_features, path=os.path.join(tmp_dir, "beta.arrow")) as writer:
for key, record in dummy_data:
example = my_features.encode_example(record)
writer.write(example)
num_examples, num_bytes = writer.finalize()
@get_duration
def read_unformated(feats, tmp_dir):
dataset = datasets.Dataset.from_file(
filename=os.path.join(tmp_dir, "beta.arrow"), info=datasets.DatasetInfo(features=feats)
)
for _ in dataset:
pass
@get_duration
def read_formatted_as_numpy(feats, tmp_dir):
dataset = datasets.Dataset.from_file(
filename=os.path.join(tmp_dir, "beta.arrow"), info=datasets.DatasetInfo(features=feats)
)
dataset.set_format("numpy")
for _ in dataset:
pass
@get_duration
def read_batch_unformated(feats, tmp_dir):
batch_size = 10
dataset = datasets.Dataset.from_file(
filename=os.path.join(tmp_dir, "beta.arrow"), info=datasets.DatasetInfo(features=feats)
)
for i in range(0, len(dataset), batch_size):
_ = dataset[i : i + batch_size]
@get_duration
def read_batch_formatted_as_numpy(feats, tmp_dir):
batch_size = 10
dataset = datasets.Dataset.from_file(
filename=os.path.join(tmp_dir, "beta.arrow"), info=datasets.DatasetInfo(features=feats)
)
dataset.set_format("numpy")
for i in range(0, len(dataset), batch_size):
_ = dataset[i : i + batch_size]
@get_duration
def read_col_unformated(feats, tmp_dir):
dataset = datasets.Dataset.from_file(
filename=os.path.join(tmp_dir, "beta.arrow"), info=datasets.DatasetInfo(features=feats)
)
for col in feats:
_ = dataset[col]
@get_duration
def read_col_formatted_as_numpy(feats, tmp_dir):
dataset = datasets.Dataset.from_file(
filename=os.path.join(tmp_dir, "beta.arrow"), info=datasets.DatasetInfo(features=feats)
)
dataset.set_format("numpy")
for col in feats:
_ = dataset[col]
def benchmark_array_xd():
times = {}
read_functions = (
read_unformated,
read_formatted_as_numpy,
read_batch_unformated,
read_batch_formatted_as_numpy,
read_col_unformated,
read_col_formatted_as_numpy,
)
with tempfile.TemporaryDirectory() as tmp_dir:
feats = datasets.Features({"image": Array2D(SPEED_TEST_SHAPE, dtype="float32")})
data = generate_examples(features=feats, num_examples=SPEED_TEST_N_EXAMPLES)
times["write_array2d"] = write(feats, data, tmp_dir)
for read_func in read_functions:
times[read_func.__name__ + " after write_array2d"] = read_func(feats, tmp_dir)
with tempfile.TemporaryDirectory() as tmp_dir:
# don't use fixed length for fair comparison
# feats = datasets.Features(
# {"image": datasets.Sequence(datasets.Sequence(datasets.Value("float32"), SPEED_TEST_SHAPE[1]), SPEED_TEST_SHAPE[0])}
# )
feats = datasets.Features({"image": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))})
data = generate_examples(
features=feats, num_examples=SPEED_TEST_N_EXAMPLES, seq_shapes={"image": SPEED_TEST_SHAPE}
)
times["write_nested_sequence"] = write(feats, data, tmp_dir)
for read_func in read_functions:
times[read_func.__name__ + " after write_nested_sequence"] = read_func(feats, tmp_dir)
with tempfile.TemporaryDirectory() as tmp_dir:
# don't use fixed length for fair comparison
# feats = datasets.Features(
# {"image": datasets.Sequence(datasets.Value("float32"), SPEED_TEST_SHAPE[0] * SPEED_TEST_SHAPE[1])}
# )
feats = datasets.Features({"image": datasets.Sequence(datasets.Value("float32"))})
data = generate_examples(
features=feats,
num_examples=SPEED_TEST_N_EXAMPLES,
seq_shapes={"image": [SPEED_TEST_SHAPE[0] * SPEED_TEST_SHAPE[1]]},
)
times["write_flattened_sequence"] = write(feats, data, tmp_dir)
for read_func in read_functions:
times[read_func.__name__ + " after write_flattened_sequence"] = read_func(feats, tmp_dir)
with open(RESULTS_FILE_PATH, "wb") as f:
f.write(json.dumps(times).encode("utf-8"))
if __name__ == "__main__": # useful to run the profiler
benchmark_array_xd()
| datasets/benchmarks/benchmark_array_xd.py/0 | {
"file_path": "datasets/benchmarks/benchmark_array_xd.py",
"repo_id": "datasets",
"token_count": 2176
} | 64 |
- sections:
- local: index
title: 🤗 Datasets
- local: quickstart
title: Quickstart
- local: installation
title: Installation
title: Get started
- sections:
- local: tutorial
title: Overview
- local: load_hub
title: Load a dataset from the Hub
- local: access
title: Know your dataset
- local: use_dataset
title: Preprocess
- local: metrics
title: Evaluate predictions
- local: create_dataset
title: Create a dataset
- local: upload_dataset
title: Share a dataset to the Hub
title: "Tutorials"
- sections:
- local: how_to
title: Overview
- sections:
- local: loading
title: Load
- local: process
title: Process
- local: stream
title: Stream
- local: use_with_tensorflow
title: Use with TensorFlow
- local: use_with_pytorch
title: Use with PyTorch
- local: use_with_jax
title: Use with JAX
- local: use_with_spark
title: Use with Spark
- local: cache
title: Cache management
- local: filesystems
title: Cloud storage
- local: faiss_es
title: Search index
- local: how_to_metrics
title: Metrics
- local: beam
title: Beam Datasets
- local: troubleshoot
title: Troubleshooting
title: "General usage"
- sections:
- local: audio_load
title: Load audio data
- local: audio_process
title: Process audio data
- local: audio_dataset
title: Create an audio dataset
title: "Audio"
- sections:
- local: image_load
title: Load image data
- local: image_process
title: Process image data
- local: image_dataset
title: Create an image dataset
- local: depth_estimation
title: Depth estimation
- local: image_classification
title: Image classification
- local: semantic_segmentation
title: Semantic segmentation
- local: object_detection
title: Object detection
title: "Vision"
- sections:
- local: nlp_load
title: Load text data
- local: nlp_process
title: Process text data
title: "Text"
- sections:
- local: tabular_load
title: Load tabular data
title: "Tabular"
- sections:
- local: share
title: Share
- local: dataset_card
title: Create a dataset card
- local: repository_structure
title: Structure your repository
- local: dataset_script
title: Create a dataset loading script
title: "Dataset repository"
title: "How-to guides"
- sections:
- local: about_arrow
title: Datasets 🤝 Arrow
- local: about_cache
title: The cache
- local: about_mapstyle_vs_iterable
title: Dataset or IterableDataset
- local: about_dataset_features
title: Dataset features
- local: about_dataset_load
title: Build and load
- local: about_map_batch
title: Batch mapping
- local: about_metrics
title: All about metrics
title: "Conceptual guides"
- sections:
- local: package_reference/main_classes
title: Main classes
- local: package_reference/builder_classes
title: Builder classes
- local: package_reference/loading_methods
title: Loading methods
- local: package_reference/table_classes
title: Table Classes
- local: package_reference/utilities
title: Utilities
- local: package_reference/task_templates
title: Task templates
title: "Reference"
| datasets/docs/source/_toctree.yml/0 | {
"file_path": "datasets/docs/source/_toctree.yml",
"repo_id": "datasets",
"token_count": 1247
} | 65 |
# Create a dataset loading script
<Tip>
The dataset loading script is likely not needed if your dataset is in one of the following formats: CSV, JSON, JSON lines, text, images, audio or Parquet.
With those formats, you should be able to load your dataset automatically with [`~datasets.load_dataset`],
as long as your dataset repository has a [required structure](./repository_structure).
</Tip>
<Tip warning=true>
In the next major release, the new safety features of 🤗 Datasets will disable running dataset loading scripts by default, and you will have to pass `trust_remote_code=True` to load datasets that require running a dataset script.
</Tip>
Write a dataset script to load and share datasets that consist of data files in unsupported formats or require more complex data preparation.
This is a more advanced way to define a dataset than using [YAML metadata in the dataset card](./repository_structure#define-your-splits-in-yaml).
A dataset script is a Python file that defines the different configurations and splits of your dataset, as well as how to download and process the data.
The script can download data files from any website, or from the same dataset repository.
A dataset loading script should have the same name as a dataset repository or directory. For example, a repository named `my_dataset` should contain `my_dataset.py` script. This way it can be loaded with:
```
my_dataset/
├── README.md
└── my_dataset.py
```
```py
>>> from datasets import load_dataset
>>> load_dataset("path/to/my_dataset")
```
The following guide includes instructions for dataset scripts for how to:
- Add dataset metadata.
- Download data files.
- Generate samples.
- Generate dataset metadata.
- Upload a dataset to the Hub.
Open the [SQuAD dataset loading script](https://huggingface.co/datasets/squad/blob/main/squad.py) template to follow along on how to share a dataset.
<Tip>
To help you get started, try beginning with the dataset loading script [template](https://github.com/huggingface/datasets/blob/main/templates/new_dataset_script.py)!
</Tip>
## Add dataset attributes
The first step is to add some information, or attributes, about your dataset in [`DatasetBuilder._info`]. The most important attributes you should specify are:
1. `DatasetInfo.description` provides a concise description of your dataset. The description informs the user what's in the dataset, how it was collected, and how it can be used for a NLP task.
2. `DatasetInfo.features` defines the name and type of each column in your dataset. This will also provide the structure for each example, so it is possible to create nested subfields in a column if you want. Take a look at [`Features`] for a full list of feature types you can use.
```py
datasets.Features(
{
"id": datasets.Value("string"),
"title": datasets.Value("string"),
"context": datasets.Value("string"),
"question": datasets.Value("string"),
"answers": datasets.Sequence(
{
"text": datasets.Value("string"),
"answer_start": datasets.Value("int32"),
}
),
}
)
```
3. `DatasetInfo.homepage` contains the URL to the dataset homepage so users can find more details about the dataset.
4. `DatasetInfo.citation` contains a BibTeX citation for the dataset.
After you've filled out all these fields in the template, it should look like the following example from the SQuAD loading script:
```py
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"title": datasets.Value("string"),
"context": datasets.Value("string"),
"question": datasets.Value("string"),
"answers": datasets.features.Sequence(
{"text": datasets.Value("string"), "answer_start": datasets.Value("int32"),}
),
}
),
# No default supervised_keys (as we have to pass both question
# and context as input).
supervised_keys=None,
homepage="https://rajpurkar.github.io/SQuAD-explorer/",
citation=_CITATION,
)
```
### Multiple configurations
In some cases, your dataset may have multiple configurations. For example, the [SuperGLUE](https://huggingface.co/datasets/super_glue) dataset is a collection of 5 datasets designed to evaluate language understanding tasks. 🤗 Datasets provides [`BuilderConfig`] which allows you to create different configurations for the user to select from.
Let's study the [SuperGLUE loading script](https://huggingface.co/datasets/super_glue/blob/main/super_glue.py) to see how you can define several configurations.
1. Create a [`BuilderConfig`] subclass with attributes about your dataset. These attributes can be the features of your dataset, label classes, and a URL to the data files.
```py
class SuperGlueConfig(datasets.BuilderConfig):
"""BuilderConfig for SuperGLUE."""
def __init__(self, features, data_url, citation, url, label_classes=("False", "True"), **kwargs):
"""BuilderConfig for SuperGLUE.
Args:
features: *list[string]*, list of the features that will appear in the
feature dict. Should not include "label".
data_url: *string*, url to download the zip file from.
citation: *string*, citation for the data set.
url: *string*, url for information about the data set.
label_classes: *list[string]*, the list of classes for the label if the
label is present as a string. Non-string labels will be cast to either
'False' or 'True'.
**kwargs: keyword arguments forwarded to super.
"""
# Version history:
# 1.0.2: Fixed non-nondeterminism in ReCoRD.
# 1.0.1: Change from the pre-release trial version of SuperGLUE (v1.9) to
# the full release (v2.0).
# 1.0.0: S3 (new shuffling, sharding and slicing mechanism).
# 0.0.2: Initial version.
super().__init__(version=datasets.Version("1.0.2"), **kwargs)
self.features = features
self.label_classes = label_classes
self.data_url = data_url
self.citation = citation
self.url = url
```
2. Create instances of your config to specify the values of the attributes of each configuration. This gives you the flexibility to specify all the name and description of each configuration. These sub-class instances should be listed under `DatasetBuilder.BUILDER_CONFIGS`:
```py
class SuperGlue(datasets.GeneratorBasedBuilder):
"""The SuperGLUE benchmark."""
BUILDER_CONFIG_CLASS = SuperGlueConfig
BUILDER_CONFIGS = [
SuperGlueConfig(
name="boolq",
description=_BOOLQ_DESCRIPTION,
features=["question", "passage"],
data_url="https://dl.fbaipublicfiles.com/glue/superglue/data/v2/BoolQ.zip",
citation=_BOOLQ_CITATION,
url="https://github.com/google-research-datasets/boolean-questions",
),
...
...
SuperGlueConfig(
name="axg",
description=_AXG_DESCRIPTION,
features=["premise", "hypothesis"],
label_classes=["entailment", "not_entailment"],
data_url="https://dl.fbaipublicfiles.com/glue/superglue/data/v2/AX-g.zip",
citation=_AXG_CITATION,
url="https://github.com/rudinger/winogender-schemas",
),
```
3. Now, users can load a specific configuration of the dataset with the configuration `name`:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset('super_glue', 'boolq')
```
Additionally, users can instantiate a custom builder configuration by passing the builder configuration arguments to [`load_dataset`]:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset('super_glue', data_url="https://custom_url")
```
### Default configurations
Users must specify a configuration name when they load a dataset with multiple configurations. Otherwise, 🤗 Datasets will raise a `ValueError`, and prompt the user to select a configuration name. You can avoid this by setting a default dataset configuration with the `DEFAULT_CONFIG_NAME` attribute:
```py
class NewDataset(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.1.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="first_domain", version=VERSION, description="This part of my dataset covers a first domain"),
datasets.BuilderConfig(name="second_domain", version=VERSION, description="This part of my dataset covers a second domain"),
]
DEFAULT_CONFIG_NAME = "first_domain"
```
<Tip warning={true}>
Only use a default configuration when it makes sense. Don't set one because it may be more convenient for the user to not specify a configuration when they load your dataset. For example, multi-lingual datasets often have a separate configuration for each language. An appropriate default may be an aggregated configuration that loads all the languages of the dataset if the user doesn't request a particular one.
</Tip>
## Download data files and organize splits
After you've defined the attributes of your dataset, the next step is to download the data files and organize them according to their splits.
1. Create a dictionary of URLs in the loading script that point to the original SQuAD data files:
```py
_URL = "https://rajpurkar.github.io/SQuAD-explorer/dataset/"
_URLS = {
"train": _URL + "train-v1.1.json",
"dev": _URL + "dev-v1.1.json",
}
```
<Tip>
If the data files live in the same folder or repository of the dataset script, you can just pass the relative paths to the files instead of URLs.
</Tip>
2. [`DownloadManager.download_and_extract`] takes this dictionary and downloads the data files. Once the files are downloaded, use [`SplitGenerator`] to organize each split in the dataset. This is a simple class that contains:
- The `name` of each split. You should use the standard split names: `Split.TRAIN`, `Split.TEST`, and `Split.VALIDATION`.
- `gen_kwargs` provides the file paths to the data files to load for each split.
Your `DatasetBuilder._split_generator()` should look like this now:
```py
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
urls_to_download = self._URLS
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
]
```
## Generate samples
At this point, you have:
- Added the dataset attributes.
- Provided instructions for how to download the data files.
- Organized the splits.
The next step is to actually generate the samples in each split.
1. `DatasetBuilder._generate_examples` takes the file path provided by `gen_kwargs` to read and parse the data files. You need to write a function that loads the data files and extracts the columns.
2. Your function should yield a tuple of an `id_`, and an example from the dataset.
```py
def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
logger.info("generating examples from = %s", filepath)
with open(filepath) as f:
squad = json.load(f)
for article in squad["data"]:
title = article.get("title", "").strip()
for paragraph in article["paragraphs"]:
context = paragraph["context"].strip()
for qa in paragraph["qas"]:
question = qa["question"].strip()
id_ = qa["id"]
answer_starts = [answer["answer_start"] for answer in qa["answers"]]
answers = [answer["text"].strip() for answer in qa["answers"]]
# Features currently used are "context", "question", and "answers".
# Others are extracted here for the ease of future expansions.
yield id_, {
"title": title,
"context": context,
"question": question,
"id": id_,
"answers": {"answer_start": answer_starts, "text": answers,},
}
```
## (Optional) Generate dataset metadata
Adding dataset metadata is a great way to include information about your dataset. The metadata is stored in the dataset card `README.md` in YAML. It includes information like the number of examples required to confirm the dataset was correctly generated, and information about the dataset like its `features`.
Run the following command to generate your dataset metadata in `README.md` and make sure your new dataset loading script works correctly:
```
datasets-cli test path/to/<your-dataset-loading-script> --save_info --all_configs
```
If your dataset loading script passed the test, you should now have a `README.md` file in your dataset folder containing a `dataset_info` field with some metadata.
## Upload to the Hub
Once your script is ready, [create a dataset card](dataset_card) and [upload it to the Hub](share).
Congratulations, you can now load your dataset from the Hub! 🥳
```py
>>> from datasets import load_dataset
>>> load_dataset("<username>/my_dataset")
```
## Advanced features
### Sharding
If your dataset is made of many big files, 🤗 Datasets automatically runs your script in parallel to make it super fast!
It can help if you have hundreds or thousands of TAR archives, or JSONL files like [oscar](https://huggingface.co/datasets/oscar/blob/main/oscar.py) for example.
To make it work, we consider lists of files in `gen_kwargs` to be shards.
Therefore 🤗 Datasets can automatically spawn several workers to run `_generate_examples` in parallel, and each worker is given a subset of shards to process.
```python
class MyShardedDataset(datasets.GeneratorBasedBuilder):
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
downloaded_files = dl_manager.download([f"data/shard_{i}.jsonl" for i in range(1024)])
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": downloaded_files}),
]
def _generate_examples(self, filepaths):
# Each worker can be given a slice of the original `filepaths` list defined in the `gen_kwargs`
# so that this code can run in parallel on several shards at the same time
for filepath in filepaths:
...
```
Users can also specify `num_proc=` in `load_dataset()` to specify the number of processes to use as workers.
### ArrowBasedBuilder
For some datasets it can be much faster to yield batches of data rather than examples one by one.
You can speed up the dataset generation by yielding Arrow tables directly, instead of examples.
This is especially useful if your data comes from Pandas DataFrames for example, since the conversion from Pandas to Arrow is as simple as:
```python
import pyarrow as pa
pa_table = pa.Table.from_pandas(df)
```
To yield Arrow tables instead of single examples, make your dataset builder inherit from [`ArrowBasedBuilder`] instead of [`GeneratorBasedBuilder`], and use `_generate_tables` instead of `_generate_examples`:
```python
class MySuperFastDataset(datasets.ArrowBasedBuilder):
def _generate_tables(self, filepaths):
idx = 0
for filepath in filepaths:
...
yield idx, pa_table
idx += 1
```
Don't forget to keep your script memory efficient, in case users run them on machines with a low amount of RAM.
| datasets/docs/source/dataset_script.mdx/0 | {
"file_path": "datasets/docs/source/dataset_script.mdx",
"repo_id": "datasets",
"token_count": 5380
} | 66 |
# Evaluate predictions
<Tip warning={true}>
Metrics is deprecated in 🤗 Datasets. To learn more about how to use metrics, take a look at the library 🤗 [Evaluate](https://huggingface.co/docs/evaluate/index)! In addition to metrics, you can find more tools for evaluating models and datasets.
</Tip>
🤗 Datasets provides various common and NLP-specific [metrics](https://huggingface.co/metrics) for you to measure your models performance. In this section of the tutorials, you will load a metric and use it to evaluate your models predictions.
You can see what metrics are available with [`list_metrics`]:
```py
>>> from datasets import list_metrics
>>> metrics_list = list_metrics()
>>> len(metrics_list)
28
>>> print(metrics_list)
['accuracy', 'bertscore', 'bleu', 'bleurt', 'cer', 'comet', 'coval', 'cuad', 'f1', 'gleu', 'glue', 'indic_glue', 'matthews_correlation', 'meteor', 'pearsonr', 'precision', 'recall', 'rouge', 'sacrebleu', 'sari', 'seqeval', 'spearmanr', 'squad', 'squad_v2', 'super_glue', 'wer', 'wiki_split', 'xnli']
```
## Load metric
It is very easy to load a metric with 🤗 Datasets. In fact, you will notice that it is very similar to loading a dataset! Load a metric from the Hub with [`load_metric`]:
```py
>>> from datasets import load_metric
>>> metric = load_metric('glue', 'mrpc')
```
This will load the metric associated with the MRPC dataset from the GLUE benchmark.
## Select a configuration
If you are using a benchmark dataset, you need to select a metric that is associated with the configuration you are using. Select a metric configuration by providing the configuration name:
```py
>>> metric = load_metric('glue', 'mrpc')
```
## Metrics object
Before you begin using a [`Metric`] object, you should get to know it a little better. As with a dataset, you can return some basic information about a metric. For example, access the `inputs_description` parameter in [`datasets.MetricInfo`] to get more information about a metrics expected input format and some usage examples:
```py
>>> print(metric.inputs_description)
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
...
>>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
...
```
Notice for the MRPC configuration, the metric expects the input format to be zero or one. For a complete list of attributes you can return with your metric, take a look at [`MetricInfo`].
## Compute metric
Once you have loaded a metric, you are ready to use it to evaluate a models predictions. Provide the model predictions and references to [`~datasets.Metric.compute`]:
```py
>>> model_predictions = model(model_inputs)
>>> final_score = metric.compute(predictions=model_predictions, references=gold_references)
```
| datasets/docs/source/metrics.mdx/0 | {
"file_path": "datasets/docs/source/metrics.mdx",
"repo_id": "datasets",
"token_count": 1193
} | 67 |
# Load tabular data
A tabular dataset is a generic dataset used to describe any data stored in rows and columns, where the rows represent an example and the columns represent a feature (can be continuous or categorical). These datasets are commonly stored in CSV files, Pandas DataFrames, and in database tables. This guide will show you how to load and create a tabular dataset from:
- CSV files
- Pandas DataFrames
- Databases
## CSV files
🤗 Datasets can read CSV files by specifying the generic `csv` dataset builder name in the [`~datasets.load_dataset`] method. To load more than one CSV file, pass them as a list to the `data_files` parameter:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("csv", data_files="my_file.csv")
# load multiple CSV files
>>> dataset = load_dataset("csv", data_files=["my_file_1.csv", "my_file_2.csv", "my_file_3.csv"])
```
You can also map specific CSV files to the train and test splits:
```py
>>> dataset = load_dataset("csv", data_files={"train": ["my_train_file_1.csv", "my_train_file_2.csv"], "test": "my_test_file.csv"})
```
To load remote CSV files, pass the URLs instead:
```py
>>> base_url = "https://huggingface.co/datasets/lhoestq/demo1/resolve/main/data/"
>>> dataset = load_dataset('csv', data_files={"train": base_url + "train.csv", "test": base_url + "test.csv"})
```
To load zipped CSV files:
```py
>>> url = "https://domain.org/train_data.zip"
>>> data_files = {"train": url}
>>> dataset = load_dataset("csv", data_files=data_files)
```
## Pandas DataFrames
🤗 Datasets also supports loading datasets from [Pandas DataFrames](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.html) with the [`~datasets.Dataset.from_pandas`] method:
```py
>>> from datasets import Dataset
>>> import pandas as pd
# create a Pandas DataFrame
>>> df = pd.read_csv("https://huggingface.co/datasets/imodels/credit-card/raw/main/train.csv")
>>> df = pd.DataFrame(df)
# load Dataset from Pandas DataFrame
>>> dataset = Dataset.from_pandas(df)
```
Use the `splits` parameter to specify the name of the dataset split:
```py
>>> train_ds = Dataset.from_pandas(train_df, split="train")
>>> test_ds = Dataset.from_pandas(test_df, split="test")
```
If the dataset doesn't look as expected, you should explicitly [specify your dataset features](loading#specify-features). A [pandas.Series](https://pandas.pydata.org/docs/reference/api/pandas.Series.html) may not always carry enough information for Arrow to automatically infer a data type. For example, if a DataFrame is of length `0` or if the Series only contains `None/NaN` objects, the type is set to `null`.
## Databases
Datasets stored in databases are typically accessed with SQL queries. With 🤗 Datasets, you can connect to a database, query for the data you need, and create a dataset out of it. Then you can use all the processing features of 🤗 Datasets to prepare your dataset for training.
### SQLite
SQLite is a small, lightweight database that is fast and easy to set up. You can use an existing database if you'd like, or follow along and start from scratch.
Start by creating a quick SQLite database with this [Covid-19 data](https://github.com/nytimes/covid-19-data/blob/master/us-states.csv) from the New York Times:
```py
>>> import sqlite3
>>> import pandas as pd
>>> conn = sqlite3.connect("us_covid_data.db")
>>> df = pd.read_csv("https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-states.csv")
>>> df.to_sql("states", conn, if_exists="replace")
```
This creates a `states` table in the `us_covid_data.db` database which you can now load into a dataset.
To connect to the database, you'll need the [URI string](https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls) that identifies your database. Connecting to a database with a URI caches the returned dataset. The URI string differs for each database dialect, so be sure to check the [Database URLs](https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls) for whichever database you're using.
For SQLite, it is:
```py
>>> uri = "sqlite:///us_covid_data.db"
```
Load the table by passing the table name and URI to [`~datasets.Dataset.from_sql`]:
```py
>>> from datasets import Dataset
>>> ds = Dataset.from_sql("states", uri)
>>> ds
Dataset({
features: ['index', 'date', 'state', 'fips', 'cases', 'deaths'],
num_rows: 54382
})
```
Then you can use all of 🤗 Datasets process features like [`~datasets.Dataset.filter`] for example:
```py
>>> ds.filter(lambda x: x["state"] == "California")
```
You can also load a dataset from a SQL query instead of an entire table, which is useful for querying and joining multiple tables.
Load the dataset by passing your query and URI to [`~datasets.Dataset.from_sql`]:
```py
>>> from datasets import Dataset
>>> ds = Dataset.from_sql('SELECT * FROM states WHERE state="California";', uri)
>>> ds
Dataset({
features: ['index', 'date', 'state', 'fips', 'cases', 'deaths'],
num_rows: 1019
})
```
Then you can use all of 🤗 Datasets process features like [`~datasets.Dataset.filter`] for example:
```py
>>> ds.filter(lambda x: x["cases"] > 10000)
```
### PostgreSQL
You can also connect and load a dataset from a PostgreSQL database, however we won't directly demonstrate how in the documentation because the example is only meant to be run in a notebook. Instead, take a look at how to install and setup a PostgreSQL server in this [notebook](https://colab.research.google.com/github/nateraw/huggingface-hub-examples/blob/main/sql_with_huggingface_datasets.ipynb#scrollTo=d83yGQMPHGFi)!
After you've setup your PostgreSQL database, you can use the [`~datasets.Dataset.from_sql`] method to load a dataset from a table or query. | datasets/docs/source/tabular_load.mdx/0 | {
"file_path": "datasets/docs/source/tabular_load.mdx",
"repo_id": "datasets",
"token_count": 1868
} | 68 |
# Copyright 2020 The HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BLEURT metric."""
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@inproceedings{bleurt,
title={BLEURT: Learning Robust Metrics for Text Generation},
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
booktitle={ACL},
year={2020},
url={https://arxiv.org/abs/2004.04696}
}
"""
_DESCRIPTION = """\
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
it for your specific application (the latter is expected to perform better).
See the project's README at https://github.com/google-research/bleurt#readme for more information.
"""
_KWARGS_DESCRIPTION = """
BLEURT score.
Args:
`predictions` (list of str): prediction/candidate sentences
`references` (list of str): reference sentences
`checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.
Returns:
'scores': List of scores.
Examples:
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> bleurt = datasets.load_metric("bleurt")
>>> results = bleurt.compute(predictions=predictions, references=references)
>>> print([round(v, 2) for v in results["scores"]])
[1.03, 1.04]
"""
CHECKPOINT_URLS = {
"bleurt-tiny-128": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip",
"bleurt-tiny-512": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip",
"bleurt-base-128": "https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip",
"bleurt-base-512": "https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip",
"bleurt-large-128": "https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip",
"bleurt-large-512": "https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip",
"BLEURT-20-D3": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip",
"BLEURT-20-D6": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip",
"BLEURT-20-D12": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip",
"BLEURT-20": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class BLEURT(datasets.Metric):
def _info(self):
return datasets.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
homepage="https://github.com/google-research/bleurt",
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions": datasets.Value("string", id="sequence"),
"references": datasets.Value("string", id="sequence"),
}
),
codebase_urls=["https://github.com/google-research/bleurt"],
reference_urls=["https://github.com/google-research/bleurt", "https://arxiv.org/abs/2004.04696"],
)
def _download_and_prepare(self, dl_manager):
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
"Using default BLEURT-Base checkpoint for sequence maximum length 128. "
"You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512')."
)
self.config_name = "bleurt-base-128"
if self.config_name.lower() in CHECKPOINT_URLS:
checkpoint_name = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
checkpoint_name = self.config_name.upper()
else:
raise KeyError(
f"{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}"
)
# download the model checkpoint specified by self.config_name and set up the scorer
model_path = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name])
self.scorer = score.BleurtScorer(os.path.join(model_path, checkpoint_name))
def _compute(self, predictions, references):
scores = self.scorer.score(references=references, candidates=predictions)
return {"scores": scores}
| datasets/metrics/bleurt/bleurt.py/0 | {
"file_path": "datasets/metrics/bleurt/bleurt.py",
"repo_id": "datasets",
"token_count": 1981
} | 69 |
# Copyright 2020 The HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CUAD metric."""
import datasets
from .evaluate import evaluate
_CITATION = """\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
"""
_DESCRIPTION = """
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
"""
_KWARGS_DESCRIPTION = """
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the CUAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
'aupr': Area Under the Precision-Recall curve
'prec_at_80_recall': Precision at 80% recall
'prec_at_90_recall': Precision at 90% recall
Examples:
>>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> cuad_metric = datasets.load_metric("cuad")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class CUAD(datasets.Metric):
def _info(self):
return datasets.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions": {
"id": datasets.Value("string"),
"prediction_text": datasets.features.Sequence(datasets.Value("string")),
},
"references": {
"id": datasets.Value("string"),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string"),
"answer_start": datasets.Value("int32"),
}
),
},
}
),
codebase_urls=["https://www.atticusprojectai.org/cuad"],
reference_urls=["https://www.atticusprojectai.org/cuad"],
)
def _compute(self, predictions, references):
pred_dict = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
dataset = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
score = evaluate(dataset=dataset, predictions=pred_dict)
return score
| datasets/metrics/cuad/cuad.py/0 | {
"file_path": "datasets/metrics/cuad/cuad.py",
"repo_id": "datasets",
"token_count": 2242
} | 70 |
# Metric Card for Mahalanobis Distance
## Metric Description
Mahalonobis distance is the distance between a point and a distribution (as opposed to the distance between two points), making it the multivariate equivalent of the Euclidean distance.
It is often used in multivariate anomaly detection, classification on highly imbalanced datasets and one-class classification.
## How to Use
At minimum, this metric requires two `list`s of datapoints:
```python
>>> mahalanobis_metric = datasets.load_metric("mahalanobis")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
```
### Inputs
- `X` (`list`): data points to be compared with the `reference_distribution`.
- `reference_distribution` (`list`): data points from the reference distribution that we want to compare to.
### Output Values
`mahalanobis` (`array`): the Mahalonobis distance for each data point in `X`.
```python
>>> print(results)
{'mahalanobis': array([0.5])}
```
#### Values from Popular Papers
*N/A*
### Example
```python
>>> mahalanobis_metric = datasets.load_metric("mahalanobis")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{'mahalanobis': array([0.5])}
```
## Limitations and Bias
The Mahalanobis distance is only able to capture linear relationships between the variables, which means it cannot capture all types of outliers. Mahalanobis distance also fails to faithfully represent data that is highly skewed or multimodal.
## Citation
```bibtex
@inproceedings{mahalanobis1936generalized,
title={On the generalized distance in statistics},
author={Mahalanobis, Prasanta Chandra},
year={1936},
organization={National Institute of Science of India}
}
```
```bibtex
@article{de2000mahalanobis,
title={The Mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
```
## Further References
-[Wikipedia -- Mahalanobis Distance](https://en.wikipedia.org/wiki/Mahalanobis_distance)
-[Machine Learning Plus -- Mahalanobis Distance](https://www.machinelearningplus.com/statistics/mahalanobis-distance/)
| datasets/metrics/mahalanobis/README.md/0 | {
"file_path": "datasets/metrics/mahalanobis/README.md",
"repo_id": "datasets",
"token_count": 738
} | 71 |
# Metric Card for Precision
## Metric Description
Precision is the fraction of correctly labeled positive examples out of all of the examples that were labeled as positive. It is computed via the equation:
Precision = TP / (TP + FP)
where TP is the True positives (i.e. the examples correctly labeled as positive) and FP is the False positive examples (i.e. the examples incorrectly labeled as positive).
## How to Use
At minimum, precision takes as input a list of predicted labels, `predictions`, and a list of output labels, `references`.
```python
>>> precision_metric = datasets.load_metric("precision")
>>> results = precision_metric.compute(references=[0, 1], predictions=[0, 1])
>>> print(results)
{'precision': 1.0}
```
### Inputs
- **predictions** (`list` of `int`): Predicted class labels.
- **references** (`list` of `int`): Actual class labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`. If `average` is `None`, it should be the label order. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to None.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- 0: Returns 0 when there is a zero division.
- 1: Returns 1 when there is a zero division.
- 'warn': Raises warnings and then returns 0 when there is a zero division.
### Output Values
- **precision**(`float` or `array` of `float`): Precision score or list of precision scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate that fewer negative examples were incorrectly labeled as positive, which means that, generally, higher scores are better.
Output Example(s):
```python
{'precision': 0.2222222222222222}
```
```python
{'precision': array([0.66666667, 0.0, 0.0])}
```
#### Values from Popular Papers
### Examples
Example 1-A simple binary example
```python
>>> precision_metric = datasets.load_metric("precision")
>>> results = precision_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{'precision': 0.5}
```
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
```python
>>> precision_metric = datasets.load_metric("precision")
>>> results = precision_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results['precision'], 2))
0.67
```
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
```python
>>> precision_metric = datasets.load_metric("precision")
>>> results = precision_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(results)
{'precision': 0.23529411764705882}
```
Example 4-A multiclass example, with different values for the `average` input.
```python
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = precision_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'precision': 0.2222222222222222}
>>> results = precision_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'precision': 0.3333333333333333}
>>> results = precision_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'precision': 0.2222222222222222}
>>> results = precision_metric.compute(predictions=predictions, references=references, average=None)
>>> print([round(res, 2) for res in results['precision']])
[0.67, 0.0, 0.0]
```
## Limitations and Bias
[Precision](https://huggingface.co/metrics/precision) and [recall](https://huggingface.co/metrics/recall) are complementary and can be used to measure different aspects of model performance -- using both of them (or an averaged measure like [F1 score](https://huggingface.co/metrics/F1) to better represent different aspects of performance. See [Wikipedia](https://en.wikipedia.org/wiki/Precision_and_recall) for more information.
## Citation(s)
```bibtex
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
```
## Further References
- [Wikipedia -- Precision and recall](https://en.wikipedia.org/wiki/Precision_and_recall)
| datasets/metrics/precision/README.md/0 | {
"file_path": "datasets/metrics/precision/README.md",
"repo_id": "datasets",
"token_count": 1878
} | 72 |
# Metric Card for SQuAD
## Metric description
This metric wraps the official scoring script for version 1 of the [Stanford Question Answering Dataset (SQuAD)](https://huggingface.co/datasets/squad).
SQuAD is a reading comprehension dataset, consisting of questions posed by crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span, from the corresponding reading passage, or the question might be unanswerable.
## How to use
The metric takes two files or two lists of question-answers dictionaries as inputs : one with the predictions of the model and the other with the references to be compared to:
```python
from datasets import load_metric
squad_metric = load_metric("squad")
results = squad_metric.compute(predictions=predictions, references=references)
```
## Output values
This metric outputs a dictionary with two values: the average exact match score and the average [F1 score](https://huggingface.co/metrics/f1).
```
{'exact_match': 100.0, 'f1': 100.0}
```
The range of `exact_match` is 0-100, where 0.0 means no answers were matched and 100.0 means all answers were matched.
The range of `f1` is 0-1 -- its lowest possible value is 0, if either the precision or the recall is 0, and its highest possible value is 1.0, which means perfect precision and recall.
### Values from popular papers
The [original SQuAD paper](https://nlp.stanford.edu/pubs/rajpurkar2016squad.pdf) reported an F1 score of 51.0% and an Exact Match score of 40.0%. They also report that human performance on the dataset represents an F1 score of 90.5% and an Exact Match score of 80.3%.
For more recent model performance, see the [dataset leaderboard](https://paperswithcode.com/dataset/squad).
## Examples
Maximal values for both exact match and F1 (perfect match):
```python
from datasets import load_metric
squad_metric = load_metric("squad")
predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]
references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]
results = squad_metric.compute(predictions=predictions, references=references)
results
{'exact_match': 100.0, 'f1': 100.0}
```
Minimal values for both exact match and F1 (no match):
```python
from datasets import load_metric
squad_metric = load_metric("squad")
predictions = [{'prediction_text': '1999', 'id': '56e10a3be3433e1400422b22'}]
references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]
results = squad_metric.compute(predictions=predictions, references=references)
results
{'exact_match': 0.0, 'f1': 0.0}
```
Partial match (2 out of 3 answers correct) :
```python
from datasets import load_metric
squad_metric = load_metric("squad")
predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}, {'prediction_text': 'Beyonce', 'id': '56d2051ce7d4791d0090260b'}, {'prediction_text': 'climate change', 'id': '5733b5344776f419006610e1'}]
references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}, {'answers': {'answer_start': [233], 'text': ['Beyoncé and Bruno Mars']}, 'id': '56d2051ce7d4791d0090260b'}, {'answers': {'answer_start': [891], 'text': ['climate change']}, 'id': '5733b5344776f419006610e1'}]
results = squad_metric.compute(predictions=predictions, references=references)
results
{'exact_match': 66.66666666666667, 'f1': 66.66666666666667}
```
## Limitations and bias
This metric works only with datasets that have the same format as [SQuAD v.1 dataset](https://huggingface.co/datasets/squad).
The SQuAD dataset does contain a certain amount of noise, such as duplicate questions as well as missing answers, but these represent a minority of the 100,000 question-answer pairs. Also, neither exact match nor F1 score reflect whether models do better on certain types of questions (e.g. who questions) or those that cover a certain gender or geographical area -- carrying out more in-depth error analysis can complement these numbers.
## Citation
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
## Further References
- [The Stanford Question Answering Dataset: Background, Challenges, Progress (blog post)](https://rajpurkar.github.io/mlx/qa-and-squad/)
- [Hugging Face Course -- Question Answering](https://huggingface.co/course/chapter7/7)
| datasets/metrics/squad/README.md/0 | {
"file_path": "datasets/metrics/squad/README.md",
"repo_id": "datasets",
"token_count": 1494
} | 73 |
# Copyright 2020 The HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""XNLI benchmark metric."""
import datasets
_CITATION = """\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
"""
_DESCRIPTION = """\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
"""
_KWARGS_DESCRIPTION = """
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
'accuracy': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
"""
def simple_accuracy(preds, labels):
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class Xnli(datasets.Metric):
def _info(self):
return datasets.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32"),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32"),
}
),
codebase_urls=[],
reference_urls=[],
format="numpy",
)
def _compute(self, predictions, references):
return {"accuracy": simple_accuracy(predictions, references)}
| datasets/metrics/xnli/xnli.py/0 | {
"file_path": "datasets/metrics/xnli/xnli.py",
"repo_id": "datasets",
"token_count": 1107
} | 74 |
import fnmatch
import json
import os
import shutil
import tempfile
import xml.etree.ElementTree as ET
from argparse import ArgumentParser
from pathlib import Path
from typing import Optional
from datasets import config
from datasets.commands import BaseDatasetsCLICommand
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.download.mock_download_manager import MockDownloadManager
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.deprecation_utils import deprecated
from datasets.utils.logging import get_logger, set_verbosity_warning
from datasets.utils.py_utils import map_nested
logger = get_logger(__name__)
DEFAULT_ENCODING = "utf-8"
def dummy_data_command_factory(args):
return DummyDataCommand(
args.path_to_dataset,
args.auto_generate,
args.n_lines,
args.json_field,
args.xml_tag,
args.match_text_files,
args.keep_uncompressed,
args.cache_dir,
args.encoding,
)
class DummyDataGeneratorDownloadManager(DownloadManager):
def __init__(self, mock_download_manager, *args, **kwargs):
super().__init__(*args, **kwargs)
self.mock_download_manager = mock_download_manager
self.downloaded_dummy_paths = []
self.expected_dummy_paths = []
def download(self, url_or_urls):
output = super().download(url_or_urls)
dummy_output = self.mock_download_manager.download(url_or_urls)
map_nested(self.downloaded_dummy_paths.append, output, map_tuple=True)
map_nested(self.expected_dummy_paths.append, dummy_output, map_tuple=True)
return output
def download_and_extract(self, url_or_urls):
output = super().extract(super().download(url_or_urls))
dummy_output = self.mock_download_manager.download(url_or_urls)
map_nested(self.downloaded_dummy_paths.append, output, map_tuple=True)
map_nested(self.expected_dummy_paths.append, dummy_output, map_tuple=True)
return output
def auto_generate_dummy_data_folder(
self,
n_lines: int = 5,
json_field: Optional[str] = None,
xml_tag: Optional[str] = None,
match_text_files: Optional[str] = None,
encoding: Optional[str] = None,
) -> bool:
os.makedirs(
os.path.join(
self.mock_download_manager.datasets_scripts_dir,
self.mock_download_manager.dataset_name,
self.mock_download_manager.dummy_data_folder,
"dummy_data",
),
exist_ok=True,
)
total = 0
self.mock_download_manager.load_existing_dummy_data = False
for src_path, relative_dst_path in zip(self.downloaded_dummy_paths, self.expected_dummy_paths):
dst_path = os.path.join(
self.mock_download_manager.datasets_scripts_dir,
self.mock_download_manager.dataset_name,
self.mock_download_manager.dummy_data_folder,
relative_dst_path,
)
total += self._create_dummy_data(
src_path,
dst_path,
n_lines=n_lines,
json_field=json_field,
xml_tag=xml_tag,
match_text_files=match_text_files,
encoding=encoding,
)
if total == 0:
logger.error(
"Dummy data generation failed: no dummy files were created. "
"Make sure the data files format is supported by the auto-generation."
)
return total > 0
def _create_dummy_data(
self,
src_path: str,
dst_path: str,
n_lines: int,
json_field: Optional[str] = None,
xml_tag: Optional[str] = None,
match_text_files: Optional[str] = None,
encoding: Optional[str] = None,
) -> int:
encoding = encoding or DEFAULT_ENCODING
if os.path.isfile(src_path):
logger.debug(f"Trying to generate dummy data file {dst_path}")
dst_path_extensions = Path(dst_path).suffixes
line_by_line_extensions = [".txt", ".csv", ".jsonl", ".tsv"]
is_line_by_line_text_file = any(extension in dst_path_extensions for extension in line_by_line_extensions)
if match_text_files is not None:
file_name = os.path.basename(dst_path)
for pattern in match_text_files.split(","):
is_line_by_line_text_file |= fnmatch.fnmatch(file_name, pattern)
# Line by line text file (txt, csv etc.)
if is_line_by_line_text_file:
Path(dst_path).parent.mkdir(exist_ok=True, parents=True)
with open(src_path, encoding=encoding) as src_file:
with open(dst_path, "w", encoding=encoding) as dst_file:
first_lines = []
for i, line in enumerate(src_file):
if i >= n_lines:
break
first_lines.append(line)
dst_file.write("".join(first_lines).strip())
return 1
# json file
elif ".json" in dst_path_extensions:
with open(src_path, encoding=encoding) as src_file:
json_data = json.load(src_file)
if json_field is not None:
json_data = json_data[json_field]
if isinstance(json_data, dict):
if not all(isinstance(v, list) for v in json_data.values()):
raise ValueError(
f"Couldn't parse columns {list(json_data.keys())}. "
"Maybe specify which json field must be used "
"to read the data with --json_field <my_field>."
)
first_json_data = {k: v[:n_lines] for k, v in json_data.items()}
else:
first_json_data = json_data[:n_lines]
if json_field is not None:
first_json_data = {json_field: first_json_data}
Path(dst_path).parent.mkdir(exist_ok=True, parents=True)
with open(dst_path, "w", encoding=encoding) as dst_file:
json.dump(first_json_data, dst_file)
return 1
# xml file
elif any(extension in dst_path_extensions for extension in [".xml", ".txm"]):
if xml_tag is None:
logger.warning("Found xml file but 'xml_tag' is set to None. Please provide --xml_tag")
else:
self._create_xml_dummy_data(src_path, dst_path, xml_tag, n_lines=n_lines, encoding=encoding)
return 1
logger.warning(
f"Couldn't generate dummy file '{dst_path}'. " "Ignore that if this file is not useful for dummy data."
)
return 0
# directory, iterate through all files
elif os.path.isdir(src_path):
total = 0
for path, _, files in os.walk(src_path):
for name in files:
if not name.startswith("."): # ignore files like .DS_Store etc.
src_file_path = os.path.join(path, name)
dst_file_path = os.path.join(dst_path, Path(src_file_path).relative_to(src_path))
total += self._create_dummy_data(
src_file_path,
dst_file_path,
n_lines=n_lines,
json_field=json_field,
xml_tag=xml_tag,
match_text_files=match_text_files,
encoding=encoding,
)
return total
@staticmethod
def _create_xml_dummy_data(src_path, dst_path, xml_tag, n_lines=5, encoding=DEFAULT_ENCODING):
Path(dst_path).parent.mkdir(exist_ok=True, parents=True)
with open(src_path, encoding=encoding) as src_file:
n_line = 0
parents = []
for event, elem in ET.iterparse(src_file, events=("start", "end")):
if event == "start":
parents.append(elem)
else:
_ = parents.pop()
if elem.tag == xml_tag:
if n_line < n_lines:
n_line += 1
else:
if parents:
parents[-1].remove(elem)
ET.ElementTree(element=elem).write(dst_path, encoding=encoding)
def compress_autogenerated_dummy_data(self, path_to_dataset):
root_dir = os.path.join(path_to_dataset, self.mock_download_manager.dummy_data_folder)
base_name = os.path.join(root_dir, "dummy_data")
base_dir = "dummy_data"
logger.info(f"Compressing dummy data folder to '{base_name}.zip'")
shutil.make_archive(base_name, "zip", root_dir, base_dir)
shutil.rmtree(base_name)
@deprecated(
"The `datasets` repository does not host the dataset scripts anymore. Therefore, dummy data is no longer needed to test their loading with CI."
)
class DummyDataCommand(BaseDatasetsCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
test_parser = parser.add_parser("dummy_data", help="Generate dummy data.")
test_parser.add_argument("--auto_generate", action="store_true", help="Automatically generate dummy data")
test_parser.add_argument(
"--n_lines", type=int, default=5, help="Number of lines or samples to keep when auto-generating dummy data"
)
test_parser.add_argument(
"--json_field",
type=str,
default=None,
help="Optional, json field to read the data from when auto-generating dummy data. In the json data files, this field must point to a list of samples as json objects (ex: the 'data' field for squad-like files)",
)
test_parser.add_argument(
"--xml_tag",
type=str,
default=None,
help="Optional, xml tag name of the samples inside the xml files when auto-generating dummy data.",
)
test_parser.add_argument(
"--match_text_files",
type=str,
default=None,
help="Optional, a comma separated list of file patterns that looks for line-by-line text files other than *.txt or *.csv. Example: --match_text_files *.label",
)
test_parser.add_argument(
"--keep_uncompressed",
action="store_true",
help="Whether to leave the dummy data folders uncompressed when auto-generating dummy data. Useful for debugging for to do manual adjustements before compressing.",
)
test_parser.add_argument(
"--cache_dir",
type=str,
default=None,
help="Cache directory to download and cache files when auto-generating dummy data",
)
test_parser.add_argument(
"--encoding",
type=str,
default=None,
help=f"Encoding to use when auto-generating dummy data. Defaults to {DEFAULT_ENCODING}",
)
test_parser.add_argument("path_to_dataset", type=str, help="Path to the dataset (example: ./datasets/squad)")
test_parser.set_defaults(func=dummy_data_command_factory)
def __init__(
self,
path_to_dataset: str,
auto_generate: bool,
n_lines: int,
json_field: Optional[str],
xml_tag: Optional[str],
match_text_files: Optional[str],
keep_uncompressed: bool,
cache_dir: Optional[str],
encoding: Optional[str],
):
self._path_to_dataset = path_to_dataset
if os.path.isdir(path_to_dataset):
self._dataset_name = path_to_dataset.replace(os.sep, "/").split("/")[-1]
else:
self._dataset_name = path_to_dataset.replace(os.sep, "/").split("/")[-2]
cache_dir = os.path.expanduser(cache_dir or config.HF_DATASETS_CACHE)
self._auto_generate = auto_generate
self._n_lines = n_lines
self._json_field = json_field
self._xml_tag = xml_tag
self._match_text_files = match_text_files
self._keep_uncompressed = keep_uncompressed
self._cache_dir = cache_dir
self._encoding = encoding
def run(self):
set_verbosity_warning()
dataset_module = dataset_module_factory(self._path_to_dataset)
builder_cls = import_main_class(dataset_module.module_path)
# use `None` as config if no configs
builder_configs = builder_cls.BUILDER_CONFIGS or [None]
auto_generate_results = []
with tempfile.TemporaryDirectory() as tmp_dir:
for builder_config in builder_configs:
config_name = builder_config.name if builder_config else None
dataset_builder = builder_cls(config_name=config_name, hash=dataset_module.hash, cache_dir=tmp_dir)
version = builder_config.version if builder_config else dataset_builder.config.version
mock_dl_manager = MockDownloadManager(
dataset_name=self._dataset_name,
config=builder_config,
version=version,
use_local_dummy_data=True,
load_existing_dummy_data=False,
)
if self._auto_generate:
auto_generate_results.append(
self._autogenerate_dummy_data(
dataset_builder=dataset_builder,
mock_dl_manager=mock_dl_manager,
keep_uncompressed=self._keep_uncompressed,
)
)
else:
self._print_dummy_data_instructions(
dataset_builder=dataset_builder, mock_dl_manager=mock_dl_manager
)
if self._auto_generate and not self._keep_uncompressed:
if all(auto_generate_results):
print(f"Automatic dummy data generation succeeded for all configs of '{self._path_to_dataset}'")
else:
print(f"Automatic dummy data generation failed for some configs of '{self._path_to_dataset}'")
def _autogenerate_dummy_data(self, dataset_builder, mock_dl_manager, keep_uncompressed) -> Optional[bool]:
dl_cache_dir = (
os.path.join(self._cache_dir, config.DOWNLOADED_DATASETS_DIR)
if self._cache_dir
else config.DOWNLOADED_DATASETS_PATH
)
download_config = DownloadConfig(cache_dir=dl_cache_dir)
dl_manager = DummyDataGeneratorDownloadManager(
dataset_name=self._dataset_name, mock_download_manager=mock_dl_manager, download_config=download_config
)
dataset_builder._split_generators(dl_manager)
mock_dl_manager.load_existing_dummy_data = False # don't use real dummy data
dl_manager.auto_generate_dummy_data_folder(
n_lines=self._n_lines,
json_field=self._json_field,
xml_tag=self._xml_tag,
match_text_files=self._match_text_files,
encoding=self._encoding,
)
if not keep_uncompressed:
path_do_dataset = os.path.join(mock_dl_manager.datasets_scripts_dir, mock_dl_manager.dataset_name)
dl_manager.compress_autogenerated_dummy_data(path_do_dataset)
# now test that the dummy_data.zip file actually works
mock_dl_manager.load_existing_dummy_data = True # use real dummy data
n_examples_per_split = {}
os.makedirs(dataset_builder._cache_dir, exist_ok=True)
try:
split_generators = dataset_builder._split_generators(mock_dl_manager)
for split_generator in split_generators:
dataset_builder._prepare_split(split_generator, check_duplicate_keys=False)
n_examples_per_split[split_generator.name] = split_generator.split_info.num_examples
except OSError as e:
logger.error(
f"Failed to load dummy data for config '{dataset_builder.config.name}''.\nOriginal error:\n"
+ str(e)
)
return False
else:
if all(n_examples > 0 for n_examples in n_examples_per_split.values()):
logger.warning(
f"Dummy data generation done and dummy data test succeeded for config '{dataset_builder.config.name}''."
)
return True
else:
empty_splits = [
split_name for split_name in n_examples_per_split if n_examples_per_split[split_name] == 0
]
logger.warning(
f"Dummy data generation done but dummy data test failed since splits {empty_splits} have 0 examples for config '{dataset_builder.config.name}''."
)
return False
else:
generated_dummy_data_dir = os.path.join(self._path_to_dataset, mock_dl_manager.dummy_data_folder)
logger.info(
f"Dummy data generated in directory '{generated_dummy_data_dir}' but kept uncompressed. "
"Please compress this directory into a zip file to use it for dummy data tests."
)
def _print_dummy_data_instructions(self, dataset_builder, mock_dl_manager):
dummy_data_folder = os.path.join(self._path_to_dataset, mock_dl_manager.dummy_data_folder)
logger.info(f"Creating dummy folder structure for {dummy_data_folder}... ")
os.makedirs(dummy_data_folder, exist_ok=True)
try:
generator_splits = dataset_builder._split_generators(mock_dl_manager)
except FileNotFoundError as e:
print(
f"Dataset {self._dataset_name} with config {mock_dl_manager.config} seems to already open files in the method `_split_generators(...)`. You might consider to instead only open files in the method `_generate_examples(...)` instead. If this is not possible the dummy data has to be created with less guidance. Make sure you create the file {e.filename}."
)
files_to_create = set()
split_names = []
dummy_file_name = mock_dl_manager.dummy_file_name
for split in generator_splits:
logger.info(f"Collecting dummy data file paths to create for {split.name}")
split_names.append(split.name)
gen_kwargs = split.gen_kwargs
generator = dataset_builder._generate_examples(**gen_kwargs)
try:
dummy_data_guidance_print = "\n" + 30 * "=" + "DUMMY DATA INSTRUCTIONS" + 30 * "=" + "\n"
config_string = (
f"config {mock_dl_manager.config.name} of " if mock_dl_manager.config is not None else ""
)
dummy_data_guidance_print += (
"- In order to create the dummy data for "
+ config_string
+ f"{self._dataset_name}, please go into the folder '{dummy_data_folder}' with `cd {dummy_data_folder}` . \n\n"
)
# trigger generate function
for key, record in generator:
pass
dummy_data_guidance_print += f"- It appears that the function `_generate_examples(...)` expects one or more files in the folder {dummy_file_name} using the function `glob.glob(...)`. In this case, please refer to the `_generate_examples(...)` method to see under which filename the dummy data files should be created. \n\n"
except FileNotFoundError as e:
files_to_create.add(e.filename)
split_names = ", ".join(split_names)
if len(files_to_create) > 0:
# no glob.glob(...) in `_generate_examples(...)`
if len(files_to_create) == 1 and next(iter(files_to_create)) == dummy_file_name:
dummy_data_guidance_print += f"- Please create a single dummy data file called '{next(iter(files_to_create))}' from the folder '{dummy_data_folder}'. Make sure that the dummy data file provides at least one example for the split(s) '{split_names}' \n\n"
files_string = dummy_file_name
else:
files_string = ", ".join(files_to_create)
dummy_data_guidance_print += f"- Please create the following dummy data files '{files_string}' from the folder '{dummy_data_folder}'\n\n"
dummy_data_guidance_print += f"- For each of the splits '{split_names}', make sure that one or more of the dummy data files provide at least one example \n\n"
dummy_data_guidance_print += f"- If the method `_generate_examples(...)` includes multiple `open()` statements, you might have to create other files in addition to '{files_string}'. In this case please refer to the `_generate_examples(...)` method \n\n"
if len(files_to_create) == 1 and next(iter(files_to_create)) == dummy_file_name:
dummy_data_guidance_print += f"- After the dummy data file is created, it should be zipped to '{dummy_file_name}.zip' with the command `zip {dummy_file_name}.zip {dummy_file_name}` \n\n"
dummy_data_guidance_print += (
f"- You can now delete the file '{dummy_file_name}' with the command `rm {dummy_file_name}` \n\n"
)
dummy_data_guidance_print += f"- To get the file '{dummy_file_name}' back for further changes to the dummy data, simply unzip {dummy_file_name}.zip with the command `unzip {dummy_file_name}.zip` \n\n"
else:
dummy_data_guidance_print += f"- After all dummy data files are created, they should be zipped recursively to '{dummy_file_name}.zip' with the command `zip -r {dummy_file_name}.zip {dummy_file_name}/` \n\n"
dummy_data_guidance_print += (
f"- You can now delete the folder '{dummy_file_name}' with the command `rm -r {dummy_file_name}` \n\n"
)
dummy_data_guidance_print += f"- To get the folder '{dummy_file_name}' back for further changes to the dummy data, simply unzip {dummy_file_name}.zip with the command `unzip {dummy_file_name}.zip` \n\n"
dummy_data_guidance_print += (
f"- Make sure you have created the file '{dummy_file_name}.zip' in '{dummy_data_folder}' \n"
)
dummy_data_guidance_print += 83 * "=" + "\n"
print(dummy_data_guidance_print)
| datasets/src/datasets/commands/dummy_data.py/0 | {
"file_path": "datasets/src/datasets/commands/dummy_data.py",
"repo_id": "datasets",
"token_count": 11107
} | 75 |
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""This class handle features definition in datasets and some utilities to display table type."""
import copy
import json
import re
import sys
from collections.abc import Iterable, Mapping
from collections.abc import Sequence as SequenceABC
from dataclasses import InitVar, dataclass, field, fields
from functools import reduce, wraps
from operator import mul
from typing import Any, Callable, ClassVar, Dict, List, Optional, Tuple, Union
from typing import Sequence as Sequence_
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.compute as pc
import pyarrow.types
import pyarrow_hotfix # noqa: F401 # to fix vulnerability on pyarrow<14.0.1
from pandas.api.extensions import ExtensionArray as PandasExtensionArray
from pandas.api.extensions import ExtensionDtype as PandasExtensionDtype
from .. import config
from ..naming import camelcase_to_snakecase, snakecase_to_camelcase
from ..table import array_cast
from ..utils import experimental, logging
from ..utils.py_utils import asdict, first_non_null_value, zip_dict
from .audio import Audio
from .image import Image, encode_pil_image
from .translation import Translation, TranslationVariableLanguages
logger = logging.get_logger(__name__)
def _arrow_to_datasets_dtype(arrow_type: pa.DataType) -> str:
"""
_arrow_to_datasets_dtype takes a pyarrow.DataType and converts it to a datasets string dtype.
In effect, `dt == string_to_arrow(_arrow_to_datasets_dtype(dt))`
"""
if pyarrow.types.is_null(arrow_type):
return "null"
elif pyarrow.types.is_boolean(arrow_type):
return "bool"
elif pyarrow.types.is_int8(arrow_type):
return "int8"
elif pyarrow.types.is_int16(arrow_type):
return "int16"
elif pyarrow.types.is_int32(arrow_type):
return "int32"
elif pyarrow.types.is_int64(arrow_type):
return "int64"
elif pyarrow.types.is_uint8(arrow_type):
return "uint8"
elif pyarrow.types.is_uint16(arrow_type):
return "uint16"
elif pyarrow.types.is_uint32(arrow_type):
return "uint32"
elif pyarrow.types.is_uint64(arrow_type):
return "uint64"
elif pyarrow.types.is_float16(arrow_type):
return "float16" # pyarrow dtype is "halffloat"
elif pyarrow.types.is_float32(arrow_type):
return "float32" # pyarrow dtype is "float"
elif pyarrow.types.is_float64(arrow_type):
return "float64" # pyarrow dtype is "double"
elif pyarrow.types.is_time32(arrow_type):
return f"time32[{pa.type_for_alias(str(arrow_type)).unit}]"
elif pyarrow.types.is_time64(arrow_type):
return f"time64[{pa.type_for_alias(str(arrow_type)).unit}]"
elif pyarrow.types.is_timestamp(arrow_type):
if arrow_type.tz is None:
return f"timestamp[{arrow_type.unit}]"
elif arrow_type.tz:
return f"timestamp[{arrow_type.unit}, tz={arrow_type.tz}]"
else:
raise ValueError(f"Unexpected timestamp object {arrow_type}.")
elif pyarrow.types.is_date32(arrow_type):
return "date32" # pyarrow dtype is "date32[day]"
elif pyarrow.types.is_date64(arrow_type):
return "date64" # pyarrow dtype is "date64[ms]"
elif pyarrow.types.is_duration(arrow_type):
return f"duration[{arrow_type.unit}]"
elif pyarrow.types.is_decimal128(arrow_type):
return f"decimal128({arrow_type.precision}, {arrow_type.scale})"
elif pyarrow.types.is_decimal256(arrow_type):
return f"decimal256({arrow_type.precision}, {arrow_type.scale})"
elif pyarrow.types.is_binary(arrow_type):
return "binary"
elif pyarrow.types.is_large_binary(arrow_type):
return "large_binary"
elif pyarrow.types.is_string(arrow_type):
return "string"
elif pyarrow.types.is_large_string(arrow_type):
return "large_string"
else:
raise ValueError(f"Arrow type {arrow_type} does not have a datasets dtype equivalent.")
def string_to_arrow(datasets_dtype: str) -> pa.DataType:
"""
string_to_arrow takes a datasets string dtype and converts it to a pyarrow.DataType.
In effect, `dt == string_to_arrow(_arrow_to_datasets_dtype(dt))`
This is necessary because the datasets.Value() primitive type is constructed using a string dtype
Value(dtype=str)
But Features.type (via `get_nested_type()` expects to resolve Features into a pyarrow Schema,
which means that each Value() must be able to resolve into a corresponding pyarrow.DataType, which is the
purpose of this function.
"""
def _dtype_error_msg(dtype, pa_dtype, examples=None, urls=None):
msg = f"{dtype} is not a validly formatted string representation of the pyarrow {pa_dtype} type."
if examples:
examples = ", ".join(examples[:-1]) + " or " + examples[-1] if len(examples) > 1 else examples[0]
msg += f"\nValid examples include: {examples}."
if urls:
urls = ", ".join(urls[:-1]) + " and " + urls[-1] if len(urls) > 1 else urls[0]
msg += f"\nFor more insformation, see: {urls}."
return msg
if datasets_dtype in pa.__dict__:
return pa.__dict__[datasets_dtype]()
if (datasets_dtype + "_") in pa.__dict__:
return pa.__dict__[datasets_dtype + "_"]()
timestamp_matches = re.search(r"^timestamp\[(.*)\]$", datasets_dtype)
if timestamp_matches:
timestamp_internals = timestamp_matches.group(1)
internals_matches = re.search(r"^(s|ms|us|ns),\s*tz=([a-zA-Z0-9/_+\-:]*)$", timestamp_internals)
if timestamp_internals in ["s", "ms", "us", "ns"]:
return pa.timestamp(timestamp_internals)
elif internals_matches:
return pa.timestamp(internals_matches.group(1), internals_matches.group(2))
else:
raise ValueError(
_dtype_error_msg(
datasets_dtype,
"timestamp",
examples=["timestamp[us]", "timestamp[us, tz=America/New_York"],
urls=["https://arrow.apache.org/docs/python/generated/pyarrow.timestamp.html"],
)
)
duration_matches = re.search(r"^duration\[(.*)\]$", datasets_dtype)
if duration_matches:
duration_internals = duration_matches.group(1)
if duration_internals in ["s", "ms", "us", "ns"]:
return pa.duration(duration_internals)
else:
raise ValueError(
_dtype_error_msg(
datasets_dtype,
"duration",
examples=["duration[s]", "duration[us]"],
urls=["https://arrow.apache.org/docs/python/generated/pyarrow.duration.html"],
)
)
time_matches = re.search(r"^time(.*)\[(.*)\]$", datasets_dtype)
if time_matches:
time_internals_bits = time_matches.group(1)
if time_internals_bits == "32":
time_internals_unit = time_matches.group(2)
if time_internals_unit in ["s", "ms"]:
return pa.time32(time_internals_unit)
else:
raise ValueError(
f"{time_internals_unit} is not a valid unit for the pyarrow time32 type. Supported units: s (second) and ms (millisecond)."
)
elif time_internals_bits == "64":
time_internals_unit = time_matches.group(2)
if time_internals_unit in ["us", "ns"]:
return pa.time64(time_internals_unit)
else:
raise ValueError(
f"{time_internals_unit} is not a valid unit for the pyarrow time64 type. Supported units: us (microsecond) and ns (nanosecond)."
)
else:
raise ValueError(
_dtype_error_msg(
datasets_dtype,
"time",
examples=["time32[s]", "time64[us]"],
urls=[
"https://arrow.apache.org/docs/python/generated/pyarrow.time32.html",
"https://arrow.apache.org/docs/python/generated/pyarrow.time64.html",
],
)
)
decimal_matches = re.search(r"^decimal(.*)\((.*)\)$", datasets_dtype)
if decimal_matches:
decimal_internals_bits = decimal_matches.group(1)
if decimal_internals_bits == "128":
decimal_internals_precision_and_scale = re.search(r"^(\d+),\s*(-?\d+)$", decimal_matches.group(2))
if decimal_internals_precision_and_scale:
precision = decimal_internals_precision_and_scale.group(1)
scale = decimal_internals_precision_and_scale.group(2)
return pa.decimal128(int(precision), int(scale))
else:
raise ValueError(
_dtype_error_msg(
datasets_dtype,
"decimal128",
examples=["decimal128(10, 2)", "decimal128(4, -2)"],
urls=["https://arrow.apache.org/docs/python/generated/pyarrow.decimal128.html"],
)
)
elif decimal_internals_bits == "256":
decimal_internals_precision_and_scale = re.search(r"^(\d+),\s*(-?\d+)$", decimal_matches.group(2))
if decimal_internals_precision_and_scale:
precision = decimal_internals_precision_and_scale.group(1)
scale = decimal_internals_precision_and_scale.group(2)
return pa.decimal256(int(precision), int(scale))
else:
raise ValueError(
_dtype_error_msg(
datasets_dtype,
"decimal256",
examples=["decimal256(30, 2)", "decimal256(38, -4)"],
urls=["https://arrow.apache.org/docs/python/generated/pyarrow.decimal256.html"],
)
)
else:
raise ValueError(
_dtype_error_msg(
datasets_dtype,
"decimal",
examples=["decimal128(12, 3)", "decimal256(40, 6)"],
urls=[
"https://arrow.apache.org/docs/python/generated/pyarrow.decimal128.html",
"https://arrow.apache.org/docs/python/generated/pyarrow.decimal256.html",
],
)
)
raise ValueError(
f"Neither {datasets_dtype} nor {datasets_dtype + '_'} seems to be a pyarrow data type. "
f"Please make sure to use a correct data type, see: "
f"https://arrow.apache.org/docs/python/api/datatypes.html#factory-functions"
)
def _cast_to_python_objects(obj: Any, only_1d_for_numpy: bool, optimize_list_casting: bool) -> Tuple[Any, bool]:
"""
Cast pytorch/tensorflow/pandas objects to python numpy array/lists.
It works recursively.
If `optimize_list_casting` is True, to avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be casted.
If the first element needs to be casted, then all the elements of the list will be casted, otherwise they'll stay the same.
This trick allows to cast objects that contain tokenizers outputs without iterating over every single token for example.
Args:
obj: the object (nested struct) to cast.
only_1d_for_numpy (bool): whether to keep the full multi-dim tensors as multi-dim numpy arrays, or convert them to
nested lists of 1-dimensional numpy arrays. This can be useful to keep only 1-d arrays to instantiate Arrow arrays.
Indeed Arrow only support converting 1-dimensional array values.
optimize_list_casting (bool): whether to optimize list casting by checking the first non-null element to see if it needs to be casted
and if it doesn't, not checking the rest of the list elements.
Returns:
casted_obj: the casted object
has_changed (bool): True if the object has been changed, False if it is identical
"""
if config.TF_AVAILABLE and "tensorflow" in sys.modules:
import tensorflow as tf
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if config.JAX_AVAILABLE and "jax" in sys.modules:
import jax.numpy as jnp
if config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(obj, np.ndarray):
if obj.ndim == 0:
return obj[()], True
elif not only_1d_for_numpy or obj.ndim == 1:
return obj, False
else:
return (
[
_cast_to_python_objects(
x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
)[0]
for x in obj
],
True,
)
elif config.TORCH_AVAILABLE and "torch" in sys.modules and isinstance(obj, torch.Tensor):
if obj.ndim == 0:
return obj.detach().cpu().numpy()[()], True
elif not only_1d_for_numpy or obj.ndim == 1:
return obj.detach().cpu().numpy(), True
else:
return (
[
_cast_to_python_objects(
x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
)[0]
for x in obj.detach().cpu().numpy()
],
True,
)
elif config.TF_AVAILABLE and "tensorflow" in sys.modules and isinstance(obj, tf.Tensor):
if obj.ndim == 0:
return obj.numpy()[()], True
elif not only_1d_for_numpy or obj.ndim == 1:
return obj.numpy(), True
else:
return (
[
_cast_to_python_objects(
x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
)[0]
for x in obj.numpy()
],
True,
)
elif config.JAX_AVAILABLE and "jax" in sys.modules and isinstance(obj, jnp.ndarray):
if obj.ndim == 0:
return np.asarray(obj)[()], True
elif not only_1d_for_numpy or obj.ndim == 1:
return np.asarray(obj), True
else:
return (
[
_cast_to_python_objects(
x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
)[0]
for x in np.asarray(obj)
],
True,
)
elif config.PIL_AVAILABLE and "PIL" in sys.modules and isinstance(obj, PIL.Image.Image):
return encode_pil_image(obj), True
elif isinstance(obj, pd.Series):
return (
_cast_to_python_objects(
obj.tolist(), only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
)[0],
True,
)
elif isinstance(obj, pd.DataFrame):
return (
{
key: _cast_to_python_objects(
value, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
)[0]
for key, value in obj.to_dict("series").items()
},
True,
)
elif isinstance(obj, pd.Timestamp):
return obj.to_pydatetime(), True
elif isinstance(obj, pd.Timedelta):
return obj.to_pytimedelta(), True
elif isinstance(obj, Mapping):
has_changed = not isinstance(obj, dict)
output = {}
for k, v in obj.items():
casted_v, has_changed_v = _cast_to_python_objects(
v, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
)
has_changed |= has_changed_v
output[k] = casted_v
return output if has_changed else obj, has_changed
elif hasattr(obj, "__array__"):
return (
_cast_to_python_objects(
obj.__array__(), only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
)[0],
True,
)
elif isinstance(obj, (list, tuple)):
if len(obj) > 0:
for first_elmt in obj:
if _check_non_null_non_empty_recursive(first_elmt):
break
casted_first_elmt, has_changed_first_elmt = _cast_to_python_objects(
first_elmt, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
)
if has_changed_first_elmt or not optimize_list_casting:
return (
[
_cast_to_python_objects(
elmt, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
)[0]
for elmt in obj
],
True,
)
else:
if isinstance(obj, (list, tuple)):
return obj, False
else:
return list(obj), True
else:
return obj, False
else:
return obj, False
def cast_to_python_objects(obj: Any, only_1d_for_numpy=False, optimize_list_casting=True) -> Any:
"""
Cast numpy/pytorch/tensorflow/pandas objects to python lists.
It works recursively.
If `optimize_list_casting` is True, To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be casted.
If the first element needs to be casted, then all the elements of the list will be casted, otherwise they'll stay the same.
This trick allows to cast objects that contain tokenizers outputs without iterating over every single token for example.
Args:
obj: the object (nested struct) to cast
only_1d_for_numpy (bool, default ``False``): whether to keep the full multi-dim tensors as multi-dim numpy arrays, or convert them to
nested lists of 1-dimensional numpy arrays. This can be useful to keep only 1-d arrays to instantiate Arrow arrays.
Indeed Arrow only support converting 1-dimensional array values.
optimize_list_casting (bool, default ``True``): whether to optimize list casting by checking the first non-null element to see if it needs to be casted
and if it doesn't, not checking the rest of the list elements.
Returns:
casted_obj: the casted object
"""
return _cast_to_python_objects(
obj, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
)[0]
@dataclass
class Value:
"""
The `Value` dtypes are as follows:
- `null`
- `bool`
- `int8`
- `int16`
- `int32`
- `int64`
- `uint8`
- `uint16`
- `uint32`
- `uint64`
- `float16`
- `float32` (alias float)
- `float64` (alias double)
- `time32[(s|ms)]`
- `time64[(us|ns)]`
- `timestamp[(s|ms|us|ns)]`
- `timestamp[(s|ms|us|ns), tz=(tzstring)]`
- `date32`
- `date64`
- `duration[(s|ms|us|ns)]`
- `decimal128(precision, scale)`
- `decimal256(precision, scale)`
- `binary`
- `large_binary`
- `string`
- `large_string`
Example:
```py
>>> from datasets import Features
>>> features = Features({'stars': Value(dtype='int32')})
>>> features
{'stars': Value(dtype='int32', id=None)}
```
"""
dtype: str
id: Optional[str] = None
# Automatically constructed
pa_type: ClassVar[Any] = None
_type: str = field(default="Value", init=False, repr=False)
def __post_init__(self):
if self.dtype == "double": # fix inferred type
self.dtype = "float64"
if self.dtype == "float": # fix inferred type
self.dtype = "float32"
self.pa_type = string_to_arrow(self.dtype)
def __call__(self):
return self.pa_type
def encode_example(self, value):
if pa.types.is_boolean(self.pa_type):
return bool(value)
elif pa.types.is_integer(self.pa_type):
return int(value)
elif pa.types.is_floating(self.pa_type):
return float(value)
elif pa.types.is_string(self.pa_type):
return str(value)
else:
return value
class _ArrayXD:
def __post_init__(self):
self.shape = tuple(self.shape)
def __call__(self):
pa_type = globals()[self.__class__.__name__ + "ExtensionType"](self.shape, self.dtype)
return pa_type
def encode_example(self, value):
return value
@dataclass
class Array2D(_ArrayXD):
"""Create a two-dimensional array.
Args:
shape (`tuple`):
The size of each dimension.
dtype (`str`):
The value of the data type.
Example:
```py
>>> from datasets import Features
>>> features = Features({'x': Array2D(shape=(1, 3), dtype='int32')})
```
"""
shape: tuple
dtype: str
id: Optional[str] = None
# Automatically constructed
_type: str = field(default="Array2D", init=False, repr=False)
@dataclass
class Array3D(_ArrayXD):
"""Create a three-dimensional array.
Args:
shape (`tuple`):
The size of each dimension.
dtype (`str`):
The value of the data type.
Example:
```py
>>> from datasets import Features
>>> features = Features({'x': Array3D(shape=(1, 2, 3), dtype='int32')})
```
"""
shape: tuple
dtype: str
id: Optional[str] = None
# Automatically constructed
_type: str = field(default="Array3D", init=False, repr=False)
@dataclass
class Array4D(_ArrayXD):
"""Create a four-dimensional array.
Args:
shape (`tuple`):
The size of each dimension.
dtype (`str`):
The value of the data type.
Example:
```py
>>> from datasets import Features
>>> features = Features({'x': Array4D(shape=(1, 2, 2, 3), dtype='int32')})
```
"""
shape: tuple
dtype: str
id: Optional[str] = None
# Automatically constructed
_type: str = field(default="Array4D", init=False, repr=False)
@dataclass
class Array5D(_ArrayXD):
"""Create a five-dimensional array.
Args:
shape (`tuple`):
The size of each dimension.
dtype (`str`):
The value of the data type.
Example:
```py
>>> from datasets import Features
>>> features = Features({'x': Array5D(shape=(1, 2, 2, 3, 3), dtype='int32')})
```
"""
shape: tuple
dtype: str
id: Optional[str] = None
# Automatically constructed
_type: str = field(default="Array5D", init=False, repr=False)
class _ArrayXDExtensionType(pa.ExtensionType):
ndims: Optional[int] = None
def __init__(self, shape: tuple, dtype: str):
if self.ndims is None or self.ndims <= 1:
raise ValueError("You must instantiate an array type with a value for dim that is > 1")
if len(shape) != self.ndims:
raise ValueError(f"shape={shape} and ndims={self.ndims} don't match")
for dim in range(1, self.ndims):
if shape[dim] is None:
raise ValueError(f"Support only dynamic size on first dimension. Got: {shape}")
self.shape = tuple(shape)
self.value_type = dtype
self.storage_dtype = self._generate_dtype(self.value_type)
pa.ExtensionType.__init__(self, self.storage_dtype, f"{self.__class__.__module__}.{self.__class__.__name__}")
def __arrow_ext_serialize__(self):
return json.dumps((self.shape, self.value_type)).encode()
@classmethod
def __arrow_ext_deserialize__(cls, storage_type, serialized):
args = json.loads(serialized)
return cls(*args)
# This was added to pa.ExtensionType in pyarrow >= 13.0.0
def __reduce__(self):
return self.__arrow_ext_deserialize__, (self.storage_type, self.__arrow_ext_serialize__())
def __hash__(self):
return hash((self.__class__, self.shape, self.value_type))
def __arrow_ext_class__(self):
return ArrayExtensionArray
def _generate_dtype(self, dtype):
dtype = string_to_arrow(dtype)
for d in reversed(self.shape):
dtype = pa.list_(dtype)
# Don't specify the size of the list, since fixed length list arrays have issues
# being validated after slicing in pyarrow 0.17.1
return dtype
def to_pandas_dtype(self):
return PandasArrayExtensionDtype(self.value_type)
class Array2DExtensionType(_ArrayXDExtensionType):
ndims = 2
class Array3DExtensionType(_ArrayXDExtensionType):
ndims = 3
class Array4DExtensionType(_ArrayXDExtensionType):
ndims = 4
class Array5DExtensionType(_ArrayXDExtensionType):
ndims = 5
# Register the extension types for deserialization
pa.register_extension_type(Array2DExtensionType((1, 2), "int64"))
pa.register_extension_type(Array3DExtensionType((1, 2, 3), "int64"))
pa.register_extension_type(Array4DExtensionType((1, 2, 3, 4), "int64"))
pa.register_extension_type(Array5DExtensionType((1, 2, 3, 4, 5), "int64"))
def _is_zero_copy_only(pa_type: pa.DataType, unnest: bool = False) -> bool:
"""
When converting a pyarrow array to a numpy array, we must know whether this could be done in zero-copy or not.
This function returns the value of the ``zero_copy_only`` parameter to pass to ``.to_numpy()``, given the type of the pyarrow array.
# zero copy is available for all primitive types except booleans and temporal types (date, time, timestamp or duration)
# primitive types are types for which the physical representation in arrow and in numpy
# https://github.com/wesm/arrow/blob/c07b9b48cf3e0bbbab493992a492ae47e5b04cad/python/pyarrow/types.pxi#L821
# see https://arrow.apache.org/docs/python/generated/pyarrow.Array.html#pyarrow.Array.to_numpy
# and https://issues.apache.org/jira/browse/ARROW-2871?jql=text%20~%20%22boolean%20to_numpy%22
"""
def _unnest_pa_type(pa_type: pa.DataType) -> pa.DataType:
if pa.types.is_list(pa_type):
return _unnest_pa_type(pa_type.value_type)
return pa_type
if unnest:
pa_type = _unnest_pa_type(pa_type)
return pa.types.is_primitive(pa_type) and not (pa.types.is_boolean(pa_type) or pa.types.is_temporal(pa_type))
class ArrayExtensionArray(pa.ExtensionArray):
def __array__(self):
zero_copy_only = _is_zero_copy_only(self.storage.type, unnest=True)
return self.to_numpy(zero_copy_only=zero_copy_only)
def __getitem__(self, i):
return self.storage[i]
def to_numpy(self, zero_copy_only=True):
storage: pa.ListArray = self.storage
null_mask = storage.is_null().to_numpy(zero_copy_only=False)
if self.type.shape[0] is not None:
size = 1
null_indices = np.arange(len(storage))[null_mask] - np.arange(np.sum(null_mask))
for i in range(self.type.ndims):
size *= self.type.shape[i]
storage = storage.flatten()
numpy_arr = storage.to_numpy(zero_copy_only=zero_copy_only)
numpy_arr = numpy_arr.reshape(len(self) - len(null_indices), *self.type.shape)
if len(null_indices):
numpy_arr = np.insert(numpy_arr.astype(np.float64), null_indices, np.nan, axis=0)
else:
shape = self.type.shape
ndims = self.type.ndims
arrays = []
first_dim_offsets = np.array([off.as_py() for off in storage.offsets])
for i, is_null in enumerate(null_mask):
if is_null:
arrays.append(np.nan)
else:
storage_el = storage[i : i + 1]
first_dim = first_dim_offsets[i + 1] - first_dim_offsets[i]
# flatten storage
for _ in range(ndims):
storage_el = storage_el.flatten()
numpy_arr = storage_el.to_numpy(zero_copy_only=zero_copy_only)
arrays.append(numpy_arr.reshape(first_dim, *shape[1:]))
if len(np.unique(np.diff(first_dim_offsets))) > 1:
# ragged
numpy_arr = np.empty(len(arrays), dtype=object)
numpy_arr[:] = arrays
else:
numpy_arr = np.array(arrays)
return numpy_arr
def to_pylist(self):
zero_copy_only = _is_zero_copy_only(self.storage.type, unnest=True)
numpy_arr = self.to_numpy(zero_copy_only=zero_copy_only)
if self.type.shape[0] is None and numpy_arr.dtype == object:
return [arr.tolist() for arr in numpy_arr.tolist()]
else:
return numpy_arr.tolist()
class PandasArrayExtensionDtype(PandasExtensionDtype):
_metadata = "value_type"
def __init__(self, value_type: Union["PandasArrayExtensionDtype", np.dtype]):
self._value_type = value_type
def __from_arrow__(self, array: Union[pa.Array, pa.ChunkedArray]):
if isinstance(array, pa.ChunkedArray):
array = array.type.wrap_array(pa.concat_arrays([chunk.storage for chunk in array.chunks]))
zero_copy_only = _is_zero_copy_only(array.storage.type, unnest=True)
numpy_arr = array.to_numpy(zero_copy_only=zero_copy_only)
return PandasArrayExtensionArray(numpy_arr)
@classmethod
def construct_array_type(cls):
return PandasArrayExtensionArray
@property
def type(self) -> type:
return np.ndarray
@property
def kind(self) -> str:
return "O"
@property
def name(self) -> str:
return f"array[{self.value_type}]"
@property
def value_type(self) -> np.dtype:
return self._value_type
class PandasArrayExtensionArray(PandasExtensionArray):
def __init__(self, data: np.ndarray, copy: bool = False):
self._data = data if not copy else np.array(data)
self._dtype = PandasArrayExtensionDtype(data.dtype)
def __array__(self, dtype=None):
"""
Convert to NumPy Array.
Note that Pandas expects a 1D array when dtype is set to object.
But for other dtypes, the returned shape is the same as the one of ``data``.
More info about pandas 1D requirement for PandasExtensionArray here:
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.api.extensions.ExtensionArray.html#pandas.api.extensions.ExtensionArray
"""
if dtype == object:
out = np.empty(len(self._data), dtype=object)
for i in range(len(self._data)):
out[i] = self._data[i]
return out
if dtype is None:
return self._data
else:
return self._data.astype(dtype)
def copy(self, deep: bool = False) -> "PandasArrayExtensionArray":
return PandasArrayExtensionArray(self._data, copy=True)
@classmethod
def _from_sequence(
cls, scalars, dtype: Optional[PandasArrayExtensionDtype] = None, copy: bool = False
) -> "PandasArrayExtensionArray":
if len(scalars) > 1 and all(
isinstance(x, np.ndarray) and x.shape == scalars[0].shape and x.dtype == scalars[0].dtype for x in scalars
):
data = np.array(scalars, dtype=dtype if dtype is None else dtype.value_type, copy=copy)
else:
data = np.empty(len(scalars), dtype=object)
data[:] = scalars
return cls(data, copy=copy)
@classmethod
def _concat_same_type(cls, to_concat: Sequence_["PandasArrayExtensionArray"]) -> "PandasArrayExtensionArray":
if len(to_concat) > 1 and all(
va._data.shape == to_concat[0]._data.shape and va._data.dtype == to_concat[0]._data.dtype
for va in to_concat
):
data = np.vstack([va._data for va in to_concat])
else:
data = np.empty(len(to_concat), dtype=object)
data[:] = [va._data for va in to_concat]
return cls(data, copy=False)
@property
def dtype(self) -> PandasArrayExtensionDtype:
return self._dtype
@property
def nbytes(self) -> int:
return self._data.nbytes
def isna(self) -> np.ndarray:
return np.array([pd.isna(arr).any() for arr in self._data])
def __setitem__(self, key: Union[int, slice, np.ndarray], value: Any) -> None:
raise NotImplementedError()
def __getitem__(self, item: Union[int, slice, np.ndarray]) -> Union[np.ndarray, "PandasArrayExtensionArray"]:
if isinstance(item, int):
return self._data[item]
return PandasArrayExtensionArray(self._data[item], copy=False)
def take(
self, indices: Sequence_[int], allow_fill: bool = False, fill_value: bool = None
) -> "PandasArrayExtensionArray":
indices: np.ndarray = np.asarray(indices, dtype=int)
if allow_fill:
fill_value = (
self.dtype.na_value if fill_value is None else np.asarray(fill_value, dtype=self.dtype.value_type)
)
mask = indices == -1
if (indices < -1).any():
raise ValueError("Invalid value in `indices`, must be all >= -1 for `allow_fill` is True")
elif len(self) > 0:
pass
elif not np.all(mask):
raise IndexError("Invalid take for empty PandasArrayExtensionArray, must be all -1.")
else:
data = np.array([fill_value] * len(indices), dtype=self.dtype.value_type)
return PandasArrayExtensionArray(data, copy=False)
took = self._data.take(indices, axis=0)
if allow_fill and mask.any():
took[mask] = [fill_value] * np.sum(mask)
return PandasArrayExtensionArray(took, copy=False)
def __len__(self) -> int:
return len(self._data)
def __eq__(self, other) -> np.ndarray:
if not isinstance(other, PandasArrayExtensionArray):
raise NotImplementedError(f"Invalid type to compare to: {type(other)}")
return (self._data == other._data).all()
def pandas_types_mapper(dtype):
if isinstance(dtype, _ArrayXDExtensionType):
return PandasArrayExtensionDtype(dtype.value_type)
@dataclass
class ClassLabel:
"""Feature type for integer class labels.
There are 3 ways to define a `ClassLabel`, which correspond to the 3 arguments:
* `num_classes`: Create 0 to (num_classes-1) labels.
* `names`: List of label strings.
* `names_file`: File containing the list of labels.
Under the hood the labels are stored as integers.
You can use negative integers to represent unknown/missing labels.
Args:
num_classes (`int`, *optional*):
Number of classes. All labels must be < `num_classes`.
names (`list` of `str`, *optional*):
String names for the integer classes.
The order in which the names are provided is kept.
names_file (`str`, *optional*):
Path to a file with names for the integer classes, one per line.
Example:
```py
>>> from datasets import Features
>>> features = Features({'label': ClassLabel(num_classes=3, names=['bad', 'ok', 'good'])})
>>> features
{'label': ClassLabel(num_classes=3, names=['bad', 'ok', 'good'], id=None)}
```
"""
num_classes: InitVar[Optional[int]] = None # Pseudo-field: ignored by asdict/fields when converting to/from dict
names: List[str] = None
names_file: InitVar[Optional[str]] = None # Pseudo-field: ignored by asdict/fields when converting to/from dict
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "int64"
pa_type: ClassVar[Any] = pa.int64()
_str2int: ClassVar[Dict[str, int]] = None
_int2str: ClassVar[Dict[int, int]] = None
_type: str = field(default="ClassLabel", init=False, repr=False)
def __post_init__(self, num_classes, names_file):
self.num_classes = num_classes
self.names_file = names_file
if self.names_file is not None and self.names is not None:
raise ValueError("Please provide either names or names_file but not both.")
# Set self.names
if self.names is None:
if self.names_file is not None:
self.names = self._load_names_from_file(self.names_file)
elif self.num_classes is not None:
self.names = [str(i) for i in range(self.num_classes)]
else:
raise ValueError("Please provide either num_classes, names or names_file.")
elif not isinstance(self.names, SequenceABC):
raise TypeError(f"Please provide names as a list, is {type(self.names)}")
# Set self.num_classes
if self.num_classes is None:
self.num_classes = len(self.names)
elif self.num_classes != len(self.names):
raise ValueError(
"ClassLabel number of names do not match the defined num_classes. "
f"Got {len(self.names)} names VS {self.num_classes} num_classes"
)
# Prepare mappings
self._int2str = [str(name) for name in self.names]
self._str2int = {name: i for i, name in enumerate(self._int2str)}
if len(self._int2str) != len(self._str2int):
raise ValueError("Some label names are duplicated. Each label name should be unique.")
def __call__(self):
return self.pa_type
def str2int(self, values: Union[str, Iterable]) -> Union[int, Iterable]:
"""Conversion class name `string` => `integer`.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train")
>>> ds.features["label"].str2int('neg')
0
```
"""
if not isinstance(values, str) and not isinstance(values, Iterable):
raise ValueError(
f"Values {values} should be a string or an Iterable (list, numpy array, pytorch, tensorflow tensors)"
)
return_list = True
if isinstance(values, str):
values = [values]
return_list = False
output = [self._strval2int(value) for value in values]
return output if return_list else output[0]
def _strval2int(self, value: str) -> int:
failed_parse = False
value = str(value)
# first attempt - raw string value
int_value = self._str2int.get(value)
if int_value is None:
# second attempt - strip whitespace
int_value = self._str2int.get(value.strip())
if int_value is None:
# third attempt - convert str to int
try:
int_value = int(value)
except ValueError:
failed_parse = True
else:
if int_value < -1 or int_value >= self.num_classes:
failed_parse = True
if failed_parse:
raise ValueError(f"Invalid string class label {value}")
return int_value
def int2str(self, values: Union[int, Iterable]) -> Union[str, Iterable]:
"""Conversion `integer` => class name `string`.
Regarding unknown/missing labels: passing negative integers raises `ValueError`.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train")
>>> ds.features["label"].int2str(0)
'neg'
```
"""
if not isinstance(values, int) and not isinstance(values, Iterable):
raise ValueError(
f"Values {values} should be an integer or an Iterable (list, numpy array, pytorch, tensorflow tensors)"
)
return_list = True
if isinstance(values, int):
values = [values]
return_list = False
for v in values:
if not 0 <= v < self.num_classes:
raise ValueError(f"Invalid integer class label {v:d}")
output = [self._int2str[int(v)] for v in values]
return output if return_list else output[0]
def encode_example(self, example_data):
if self.num_classes is None:
raise ValueError(
"Trying to use ClassLabel feature with undefined number of class. "
"Please set ClassLabel.names or num_classes."
)
# If a string is given, convert to associated integer
if isinstance(example_data, str):
example_data = self.str2int(example_data)
# Allowing -1 to mean no label.
if not -1 <= example_data < self.num_classes:
raise ValueError(f"Class label {example_data:d} greater than configured num_classes {self.num_classes}")
return example_data
def cast_storage(self, storage: Union[pa.StringArray, pa.IntegerArray]) -> pa.Int64Array:
"""Cast an Arrow array to the `ClassLabel` arrow storage type.
The Arrow types that can be converted to the `ClassLabel` pyarrow storage type are:
- `pa.string()`
- `pa.int()`
Args:
storage (`Union[pa.StringArray, pa.IntegerArray]`):
PyArrow array to cast.
Returns:
`pa.Int64Array`: Array in the `ClassLabel` arrow storage type.
"""
if isinstance(storage, pa.IntegerArray) and len(storage) > 0:
min_max = pc.min_max(storage).as_py()
if min_max["max"] is not None and min_max["max"] >= self.num_classes:
raise ValueError(
f"Class label {min_max['max']} greater than configured num_classes {self.num_classes}"
)
elif isinstance(storage, pa.StringArray):
storage = pa.array(
[self._strval2int(label) if label is not None else None for label in storage.to_pylist()]
)
return array_cast(storage, self.pa_type)
@staticmethod
def _load_names_from_file(names_filepath):
with open(names_filepath, encoding="utf-8") as f:
return [name.strip() for name in f.read().split("\n") if name.strip()] # Filter empty names
@dataclass
class Sequence:
"""Construct a list of feature from a single type or a dict of types.
Mostly here for compatiblity with tfds.
Args:
feature:
A list of features of a single type or a dictionary of types.
length (`int`):
Length of the sequence.
Example:
```py
>>> from datasets import Features, Sequence, Value, ClassLabel
>>> features = Features({'post': Sequence(feature={'text': Value(dtype='string'), 'upvotes': Value(dtype='int32'), 'label': ClassLabel(num_classes=2, names=['hot', 'cold'])})})
>>> features
{'post': Sequence(feature={'text': Value(dtype='string', id=None), 'upvotes': Value(dtype='int32', id=None), 'label': ClassLabel(num_classes=2, names=['hot', 'cold'], id=None)}, length=-1, id=None)}
```
"""
feature: Any
length: int = -1
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "list"
pa_type: ClassVar[Any] = None
_type: str = field(default="Sequence", init=False, repr=False)
FeatureType = Union[
dict,
list,
tuple,
Value,
ClassLabel,
Translation,
TranslationVariableLanguages,
Sequence,
Array2D,
Array3D,
Array4D,
Array5D,
Audio,
Image,
]
def _check_non_null_non_empty_recursive(obj, schema: Optional[FeatureType] = None) -> bool:
"""
Check if the object is not None.
If the object is a list or a tuple, recursively check the first element of the sequence and stop if at any point the first element is not a sequence or is an empty sequence.
"""
if obj is None:
return False
elif isinstance(obj, (list, tuple)) and (schema is None or isinstance(schema, (list, tuple, Sequence))):
if len(obj) > 0:
if schema is None:
pass
elif isinstance(schema, (list, tuple)):
schema = schema[0]
else:
schema = schema.feature
return _check_non_null_non_empty_recursive(obj[0], schema)
else:
return False
else:
return True
def get_nested_type(schema: FeatureType) -> pa.DataType:
"""
get_nested_type() converts a datasets.FeatureType into a pyarrow.DataType, and acts as the inverse of
generate_from_arrow_type().
It performs double-duty as the implementation of Features.type and handles the conversion of
datasets.Feature->pa.struct
"""
# Nested structures: we allow dict, list/tuples, sequences
if isinstance(schema, Features):
return pa.struct(
{key: get_nested_type(schema[key]) for key in schema}
) # Features is subclass of dict, and dict order is deterministic since Python 3.6
elif isinstance(schema, dict):
return pa.struct(
{key: get_nested_type(schema[key]) for key in schema}
) # however don't sort on struct types since the order matters
elif isinstance(schema, (list, tuple)):
if len(schema) != 1:
raise ValueError("When defining list feature, you should just provide one example of the inner type")
value_type = get_nested_type(schema[0])
return pa.list_(value_type)
elif isinstance(schema, Sequence):
value_type = get_nested_type(schema.feature)
# We allow to reverse list of dict => dict of list for compatibility with tfds
if isinstance(schema.feature, dict):
return pa.struct({f.name: pa.list_(f.type, schema.length) for f in value_type})
return pa.list_(value_type, schema.length)
# Other objects are callable which returns their data type (ClassLabel, Array2D, Translation, Arrow datatype creation methods)
return schema()
def encode_nested_example(schema, obj, level=0):
"""Encode a nested example.
This is used since some features (in particular ClassLabel) have some logic during encoding.
To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be encoded.
If the first element needs to be encoded, then all the elements of the list will be encoded, otherwise they'll stay the same.
"""
# Nested structures: we allow dict, list/tuples, sequences
if isinstance(schema, dict):
if level == 0 and obj is None:
raise ValueError("Got None but expected a dictionary instead")
return (
{k: encode_nested_example(schema[k], obj.get(k), level=level + 1) for k in schema}
if obj is not None
else None
)
elif isinstance(schema, (list, tuple)):
sub_schema = schema[0]
if obj is None:
return None
else:
if len(obj) > 0:
for first_elmt in obj:
if _check_non_null_non_empty_recursive(first_elmt, sub_schema):
break
if encode_nested_example(sub_schema, first_elmt, level=level + 1) != first_elmt:
return [encode_nested_example(sub_schema, o, level=level + 1) for o in obj]
return list(obj)
elif isinstance(schema, Sequence):
if obj is None:
return None
# We allow to reverse list of dict => dict of list for compatiblity with tfds
if isinstance(schema.feature, dict):
# dict of list to fill
list_dict = {}
if isinstance(obj, (list, tuple)):
# obj is a list of dict
for k in schema.feature:
list_dict[k] = [encode_nested_example(schema.feature[k], o.get(k), level=level + 1) for o in obj]
return list_dict
else:
# obj is a single dict
for k in schema.feature:
list_dict[k] = (
[encode_nested_example(schema.feature[k], o, level=level + 1) for o in obj[k]]
if k in obj
else None
)
return list_dict
# schema.feature is not a dict
if isinstance(obj, str): # don't interpret a string as a list
raise ValueError(f"Got a string but expected a list instead: '{obj}'")
else:
if len(obj) > 0:
for first_elmt in obj:
if _check_non_null_non_empty_recursive(first_elmt, schema.feature):
break
# be careful when comparing tensors here
if (
not isinstance(first_elmt, list)
or encode_nested_example(schema.feature, first_elmt, level=level + 1) != first_elmt
):
return [encode_nested_example(schema.feature, o, level=level + 1) for o in obj]
return list(obj)
# Object with special encoding:
# ClassLabel will convert from string to int, TranslationVariableLanguages does some checks
elif isinstance(schema, (Audio, Image, ClassLabel, TranslationVariableLanguages, Value, _ArrayXD)):
return schema.encode_example(obj) if obj is not None else None
# Other object should be directly convertible to a native Arrow type (like Translation and Translation)
return obj
def decode_nested_example(schema, obj, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None):
"""Decode a nested example.
This is used since some features (in particular Audio and Image) have some logic during decoding.
To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be decoded.
If the first element needs to be decoded, then all the elements of the list will be decoded, otherwise they'll stay the same.
"""
# Nested structures: we allow dict, list/tuples, sequences
if isinstance(schema, dict):
return (
{k: decode_nested_example(sub_schema, sub_obj) for k, (sub_schema, sub_obj) in zip_dict(schema, obj)}
if obj is not None
else None
)
elif isinstance(schema, (list, tuple)):
sub_schema = schema[0]
if obj is None:
return None
else:
if len(obj) > 0:
for first_elmt in obj:
if _check_non_null_non_empty_recursive(first_elmt, sub_schema):
break
if decode_nested_example(sub_schema, first_elmt) != first_elmt:
return [decode_nested_example(sub_schema, o) for o in obj]
return list(obj)
elif isinstance(schema, Sequence):
# We allow to reverse list of dict => dict of list for compatiblity with tfds
if isinstance(schema.feature, dict):
return {k: decode_nested_example([schema.feature[k]], obj[k]) for k in schema.feature}
else:
return decode_nested_example([schema.feature], obj)
# Object with special decoding:
elif isinstance(schema, (Audio, Image)):
# we pass the token to read and decode files from private repositories in streaming mode
if obj is not None and schema.decode:
return schema.decode_example(obj, token_per_repo_id=token_per_repo_id)
return obj
_FEATURE_TYPES: Dict[str, FeatureType] = {
Value.__name__: Value,
ClassLabel.__name__: ClassLabel,
Translation.__name__: Translation,
TranslationVariableLanguages.__name__: TranslationVariableLanguages,
Sequence.__name__: Sequence,
Array2D.__name__: Array2D,
Array3D.__name__: Array3D,
Array4D.__name__: Array4D,
Array5D.__name__: Array5D,
Audio.__name__: Audio,
Image.__name__: Image,
}
@experimental
def register_feature(
feature_cls: type,
feature_type: str,
):
"""
Register a Feature object using a name and class.
This function must be used on a Feature class.
"""
if feature_type in _FEATURE_TYPES:
logger.warning(
f"Overwriting feature type '{feature_type}' ({_FEATURE_TYPES[feature_type].__name__} -> {feature_cls.__name__})"
)
_FEATURE_TYPES[feature_type] = feature_cls
def generate_from_dict(obj: Any):
"""Regenerate the nested feature object from a deserialized dict.
We use the '_type' fields to get the dataclass name to load.
generate_from_dict is the recursive helper for Features.from_dict, and allows for a convenient constructor syntax
to define features from deserialized JSON dictionaries. This function is used in particular when deserializing
a :class:`DatasetInfo` that was dumped to a JSON object. This acts as an analogue to
:meth:`Features.from_arrow_schema` and handles the recursive field-by-field instantiation, but doesn't require any
mapping to/from pyarrow, except for the fact that it takes advantage of the mapping of pyarrow primitive dtypes
that :class:`Value` automatically performs.
"""
# Nested structures: we allow dict, list/tuples, sequences
if isinstance(obj, list):
return [generate_from_dict(value) for value in obj]
# Otherwise we have a dict or a dataclass
if "_type" not in obj or isinstance(obj["_type"], dict):
return {key: generate_from_dict(value) for key, value in obj.items()}
obj = dict(obj)
_type = obj.pop("_type")
class_type = _FEATURE_TYPES.get(_type, None) or globals().get(_type, None)
if class_type is None:
raise ValueError(f"Feature type '{_type}' not found. Available feature types: {list(_FEATURE_TYPES.keys())}")
if class_type == Sequence:
return Sequence(feature=generate_from_dict(obj["feature"]), length=obj.get("length", -1))
field_names = {f.name for f in fields(class_type)}
return class_type(**{k: v for k, v in obj.items() if k in field_names})
def generate_from_arrow_type(pa_type: pa.DataType) -> FeatureType:
"""
generate_from_arrow_type accepts an arrow DataType and returns a datasets FeatureType to be used as the type for
a single field.
This is the high-level arrow->datasets type conversion and is inverted by get_nested_type().
This operates at the individual *field* level, whereas Features.from_arrow_schema() operates at the
full schema level and holds the methods that represent the bijection from Features<->pyarrow.Schema
"""
if isinstance(pa_type, pa.StructType):
return {field.name: generate_from_arrow_type(field.type) for field in pa_type}
elif isinstance(pa_type, pa.FixedSizeListType):
return Sequence(feature=generate_from_arrow_type(pa_type.value_type), length=pa_type.list_size)
elif isinstance(pa_type, pa.ListType):
feature = generate_from_arrow_type(pa_type.value_type)
if isinstance(feature, (dict, tuple, list)):
return [feature]
return Sequence(feature=feature)
elif isinstance(pa_type, _ArrayXDExtensionType):
array_feature = [None, None, Array2D, Array3D, Array4D, Array5D][pa_type.ndims]
return array_feature(shape=pa_type.shape, dtype=pa_type.value_type)
elif isinstance(pa_type, pa.DictionaryType):
raise NotImplementedError # TODO(thom) this will need access to the dictionary as well (for labels). I.e. to the py_table
elif isinstance(pa_type, pa.DataType):
return Value(dtype=_arrow_to_datasets_dtype(pa_type))
else:
raise ValueError(f"Cannot convert {pa_type} to a Feature type.")
def numpy_to_pyarrow_listarray(arr: np.ndarray, type: pa.DataType = None) -> pa.ListArray:
"""Build a PyArrow ListArray from a multidimensional NumPy array"""
arr = np.array(arr)
values = pa.array(arr.flatten(), type=type)
for i in range(arr.ndim - 1):
n_offsets = reduce(mul, arr.shape[: arr.ndim - i - 1], 1)
step_offsets = arr.shape[arr.ndim - i - 1]
offsets = pa.array(np.arange(n_offsets + 1) * step_offsets, type=pa.int32())
values = pa.ListArray.from_arrays(offsets, values)
return values
def list_of_pa_arrays_to_pyarrow_listarray(l_arr: List[Optional[pa.Array]]) -> pa.ListArray:
null_mask = np.array([arr is None for arr in l_arr])
null_indices = np.arange(len(null_mask))[null_mask] - np.arange(np.sum(null_mask))
l_arr = [arr for arr in l_arr if arr is not None]
offsets = np.cumsum(
[0] + [len(arr) for arr in l_arr], dtype=object
) # convert to dtype object to allow None insertion
offsets = np.insert(offsets, null_indices, None)
offsets = pa.array(offsets, type=pa.int32())
values = pa.concat_arrays(l_arr)
return pa.ListArray.from_arrays(offsets, values)
def list_of_np_array_to_pyarrow_listarray(l_arr: List[np.ndarray], type: pa.DataType = None) -> pa.ListArray:
"""Build a PyArrow ListArray from a possibly nested list of NumPy arrays"""
if len(l_arr) > 0:
return list_of_pa_arrays_to_pyarrow_listarray(
[numpy_to_pyarrow_listarray(arr, type=type) if arr is not None else None for arr in l_arr]
)
else:
return pa.array([], type=type)
def contains_any_np_array(data: Any):
"""Return `True` if data is a NumPy ndarray or (recursively) if first non-null value in list is a NumPy ndarray.
Args:
data (Any): Data.
Returns:
bool
"""
if isinstance(data, np.ndarray):
return True
elif isinstance(data, list):
return contains_any_np_array(first_non_null_value(data)[1])
else:
return False
def any_np_array_to_pyarrow_listarray(data: Union[np.ndarray, List], type: pa.DataType = None) -> pa.ListArray:
"""Convert to PyArrow ListArray either a NumPy ndarray or (recursively) a list that may contain any NumPy ndarray.
Args:
data (Union[np.ndarray, List]): Data.
type (pa.DataType): Explicit PyArrow DataType passed to coerce the ListArray data type.
Returns:
pa.ListArray
"""
if isinstance(data, np.ndarray):
return numpy_to_pyarrow_listarray(data, type=type)
elif isinstance(data, list):
return list_of_pa_arrays_to_pyarrow_listarray([any_np_array_to_pyarrow_listarray(i, type=type) for i in data])
def to_pyarrow_listarray(data: Any, pa_type: _ArrayXDExtensionType) -> pa.Array:
"""Convert to PyArrow ListArray.
Args:
data (Any): Sequence, iterable, np.ndarray or pd.Series.
pa_type (_ArrayXDExtensionType): Any of the ArrayNDExtensionType.
Returns:
pyarrow.Array
"""
if contains_any_np_array(data):
return any_np_array_to_pyarrow_listarray(data, type=pa_type.value_type)
else:
return pa.array(data, pa_type.storage_dtype)
def _visit(feature: FeatureType, func: Callable[[FeatureType], Optional[FeatureType]]) -> FeatureType:
"""Visit a (possibly nested) feature.
Args:
feature (FeatureType): the feature type to be checked
Returns:
visited feature (FeatureType)
"""
if isinstance(feature, dict):
out = func({k: _visit(f, func) for k, f in feature.items()})
elif isinstance(feature, (list, tuple)):
out = func([_visit(feature[0], func)])
elif isinstance(feature, Sequence):
out = func(Sequence(_visit(feature.feature, func), length=feature.length))
else:
out = func(feature)
return feature if out is None else out
def require_decoding(feature: FeatureType, ignore_decode_attribute: bool = False) -> bool:
"""Check if a (possibly nested) feature requires decoding.
Args:
feature (FeatureType): the feature type to be checked
ignore_decode_attribute (:obj:`bool`, default ``False``): Whether to ignore the current value
of the `decode` attribute of the decodable feature types.
Returns:
:obj:`bool`
"""
if isinstance(feature, dict):
return any(require_decoding(f) for f in feature.values())
elif isinstance(feature, (list, tuple)):
return require_decoding(feature[0])
elif isinstance(feature, Sequence):
return require_decoding(feature.feature)
else:
return hasattr(feature, "decode_example") and (feature.decode if not ignore_decode_attribute else True)
def require_storage_cast(feature: FeatureType) -> bool:
"""Check if a (possibly nested) feature requires storage casting.
Args:
feature (FeatureType): the feature type to be checked
Returns:
:obj:`bool`
"""
if isinstance(feature, dict):
return any(require_storage_cast(f) for f in feature.values())
elif isinstance(feature, (list, tuple)):
return require_storage_cast(feature[0])
elif isinstance(feature, Sequence):
return require_storage_cast(feature.feature)
else:
return hasattr(feature, "cast_storage")
def require_storage_embed(feature: FeatureType) -> bool:
"""Check if a (possibly nested) feature requires embedding data into storage.
Args:
feature (FeatureType): the feature type to be checked
Returns:
:obj:`bool`
"""
if isinstance(feature, dict):
return any(require_storage_cast(f) for f in feature.values())
elif isinstance(feature, (list, tuple)):
return require_storage_cast(feature[0])
elif isinstance(feature, Sequence):
return require_storage_cast(feature.feature)
else:
return hasattr(feature, "embed_storage")
def keep_features_dicts_synced(func):
"""
Wrapper to keep the secondary dictionary, which tracks whether keys are decodable, of the :class:`datasets.Features` object
in sync with the main dictionary.
"""
@wraps(func)
def wrapper(*args, **kwargs):
if args:
self: "Features" = args[0]
args = args[1:]
else:
self: "Features" = kwargs.pop("self")
out = func(self, *args, **kwargs)
assert hasattr(self, "_column_requires_decoding")
self._column_requires_decoding = {col: require_decoding(feature) for col, feature in self.items()}
return out
wrapper._decorator_name_ = "_keep_dicts_synced"
return wrapper
class Features(dict):
"""A special dictionary that defines the internal structure of a dataset.
Instantiated with a dictionary of type `dict[str, FieldType]`, where keys are the desired column names,
and values are the type of that column.
`FieldType` can be one of the following:
- a [`~datasets.Value`] feature specifies a single typed value, e.g. `int64` or `string`.
- a [`~datasets.ClassLabel`] feature specifies a field with a predefined set of classes which can have labels
associated to them and will be stored as integers in the dataset.
- a python `dict` which specifies that the field is a nested field containing a mapping of sub-fields to sub-fields
features. It's possible to have nested fields of nested fields in an arbitrary manner.
- a python `list` or a [`~datasets.Sequence`] specifies that the field contains a list of objects. The python
`list` or [`~datasets.Sequence`] should be provided with a single sub-feature as an example of the feature
type hosted in this list.
<Tip>
A [`~datasets.Sequence`] with a internal dictionary feature will be automatically converted into a dictionary of
lists. This behavior is implemented to have a compatilbity layer with the TensorFlow Datasets library but may be
un-wanted in some cases. If you don't want this behavior, you can use a python `list` instead of the
[`~datasets.Sequence`].
</Tip>
- a [`Array2D`], [`Array3D`], [`Array4D`] or [`Array5D`] feature for multidimensional arrays.
- an [`Audio`] feature to store the absolute path to an audio file or a dictionary with the relative path
to an audio file ("path" key) and its bytes content ("bytes" key). This feature extracts the audio data.
- an [`Image`] feature to store the absolute path to an image file, an `np.ndarray` object, a `PIL.Image.Image` object
or a dictionary with the relative path to an image file ("path" key) and its bytes content ("bytes" key). This feature extracts the image data.
- [`~datasets.Translation`] and [`~datasets.TranslationVariableLanguages`], the two features specific to Machine Translation.
"""
def __init__(*args, **kwargs):
# self not in the signature to allow passing self as a kwarg
if not args:
raise TypeError("descriptor '__init__' of 'Features' object needs an argument")
self, *args = args
super(Features, self).__init__(*args, **kwargs)
self._column_requires_decoding: Dict[str, bool] = {
col: require_decoding(feature) for col, feature in self.items()
}
__setitem__ = keep_features_dicts_synced(dict.__setitem__)
__delitem__ = keep_features_dicts_synced(dict.__delitem__)
update = keep_features_dicts_synced(dict.update)
setdefault = keep_features_dicts_synced(dict.setdefault)
pop = keep_features_dicts_synced(dict.pop)
popitem = keep_features_dicts_synced(dict.popitem)
clear = keep_features_dicts_synced(dict.clear)
def __reduce__(self):
return Features, (dict(self),)
@property
def type(self):
"""
Features field types.
Returns:
:obj:`pyarrow.DataType`
"""
return get_nested_type(self)
@property
def arrow_schema(self):
"""
Features schema.
Returns:
:obj:`pyarrow.Schema`
"""
hf_metadata = {"info": {"features": self.to_dict()}}
return pa.schema(self.type).with_metadata({"huggingface": json.dumps(hf_metadata)})
@classmethod
def from_arrow_schema(cls, pa_schema: pa.Schema) -> "Features":
"""
Construct [`Features`] from Arrow Schema.
It also checks the schema metadata for Hugging Face Datasets features.
Non-nullable fields are not supported and set to nullable.
Args:
pa_schema (`pyarrow.Schema`):
Arrow Schema.
Returns:
[`Features`]
"""
# try to load features from the arrow schema metadata
metadata_features = Features()
if pa_schema.metadata is not None and "huggingface".encode("utf-8") in pa_schema.metadata:
metadata = json.loads(pa_schema.metadata["huggingface".encode("utf-8")].decode())
if "info" in metadata and "features" in metadata["info"] and metadata["info"]["features"] is not None:
metadata_features = Features.from_dict(metadata["info"]["features"])
metadata_features_schema = metadata_features.arrow_schema
obj = {
field.name: (
metadata_features[field.name]
if field.name in metadata_features and metadata_features_schema.field(field.name) == field
else generate_from_arrow_type(field.type)
)
for field in pa_schema
}
return cls(**obj)
@classmethod
def from_dict(cls, dic) -> "Features":
"""
Construct [`Features`] from dict.
Regenerate the nested feature object from a deserialized dict.
We use the `_type` key to infer the dataclass name of the feature `FieldType`.
It allows for a convenient constructor syntax
to define features from deserialized JSON dictionaries. This function is used in particular when deserializing
a [`DatasetInfo`] that was dumped to a JSON object. This acts as an analogue to
[`Features.from_arrow_schema`] and handles the recursive field-by-field instantiation, but doesn't require
any mapping to/from pyarrow, except for the fact that it takes advantage of the mapping of pyarrow primitive
dtypes that [`Value`] automatically performs.
Args:
dic (`dict[str, Any]`):
Python dictionary.
Returns:
`Features`
Example::
>>> Features.from_dict({'_type': {'dtype': 'string', 'id': None, '_type': 'Value'}})
{'_type': Value(dtype='string', id=None)}
"""
obj = generate_from_dict(dic)
return cls(**obj)
def to_dict(self):
return asdict(self)
def _to_yaml_list(self) -> list:
# we compute the YAML list from the dict representation that is used for JSON dump
yaml_data = self.to_dict()
def simplify(feature: dict) -> dict:
if not isinstance(feature, dict):
raise TypeError(f"Expected a dict but got a {type(feature)}: {feature}")
#
# sequence: -> sequence: int32
# dtype: int32 ->
#
if isinstance(feature.get("sequence"), dict) and list(feature["sequence"]) == ["dtype"]:
feature["sequence"] = feature["sequence"]["dtype"]
#
# sequence: -> sequence:
# struct: -> - name: foo
# - name: foo -> dtype: int32
# dtype: int32 ->
#
if isinstance(feature.get("sequence"), dict) and list(feature["sequence"]) == ["struct"]:
feature["sequence"] = feature["sequence"]["struct"]
#
# list: -> list: int32
# dtype: int32 ->
#
if isinstance(feature.get("list"), dict) and list(feature["list"]) == ["dtype"]:
feature["list"] = feature["list"]["dtype"]
#
# list: -> list:
# struct: -> - name: foo
# - name: foo -> dtype: int32
# dtype: int32 ->
#
if isinstance(feature.get("list"), dict) and list(feature["list"]) == ["struct"]:
feature["list"] = feature["list"]["struct"]
#
# class_label: -> class_label:
# names: -> names:
# - negative -> '0': negative
# - positive -> '1': positive
#
if isinstance(feature.get("class_label"), dict) and isinstance(feature["class_label"].get("names"), list):
# server-side requirement: keys must be strings
feature["class_label"]["names"] = {
str(label_id): label_name for label_id, label_name in enumerate(feature["class_label"]["names"])
}
return feature
def to_yaml_inner(obj: Union[dict, list]) -> dict:
if isinstance(obj, dict):
_type = obj.pop("_type", None)
if _type == "Sequence":
_feature = obj.pop("feature")
return simplify({"sequence": to_yaml_inner(_feature), **obj})
elif _type == "Value":
return obj
elif _type and not obj:
return {"dtype": camelcase_to_snakecase(_type)}
elif _type:
return {"dtype": simplify({camelcase_to_snakecase(_type): obj})}
else:
return {"struct": [{"name": name, **to_yaml_inner(_feature)} for name, _feature in obj.items()]}
elif isinstance(obj, list):
return simplify({"list": simplify(to_yaml_inner(obj[0]))})
elif isinstance(obj, tuple):
return to_yaml_inner(list(obj))
else:
raise TypeError(f"Expected a dict or a list but got {type(obj)}: {obj}")
def to_yaml_types(obj: dict) -> dict:
if isinstance(obj, dict):
return {k: to_yaml_types(v) for k, v in obj.items()}
elif isinstance(obj, list):
return [to_yaml_types(v) for v in obj]
elif isinstance(obj, tuple):
return to_yaml_types(list(obj))
else:
return obj
return to_yaml_types(to_yaml_inner(yaml_data)["struct"])
@classmethod
def _from_yaml_list(cls, yaml_data: list) -> "Features":
yaml_data = copy.deepcopy(yaml_data)
# we convert the list obtained from YAML data into the dict representation that is used for JSON dump
def unsimplify(feature: dict) -> dict:
if not isinstance(feature, dict):
raise TypeError(f"Expected a dict but got a {type(feature)}: {feature}")
#
# sequence: int32 -> sequence:
# -> dtype: int32
#
if isinstance(feature.get("sequence"), str):
feature["sequence"] = {"dtype": feature["sequence"]}
#
# list: int32 -> list:
# -> dtype: int32
#
if isinstance(feature.get("list"), str):
feature["list"] = {"dtype": feature["list"]}
#
# class_label: -> class_label:
# names: -> names:
# '0': negative -> - negative
# '1': positive -> - positive
#
if isinstance(feature.get("class_label"), dict) and isinstance(feature["class_label"].get("names"), dict):
label_ids = sorted(feature["class_label"]["names"], key=int)
if label_ids and [int(label_id) for label_id in label_ids] != list(range(int(label_ids[-1]) + 1)):
raise ValueError(
f"ClassLabel expected a value for all label ids [0:{int(label_ids[-1]) + 1}] but some ids are missing."
)
feature["class_label"]["names"] = [feature["class_label"]["names"][label_id] for label_id in label_ids]
return feature
def from_yaml_inner(obj: Union[dict, list]) -> Union[dict, list]:
if isinstance(obj, dict):
if not obj:
return {}
_type = next(iter(obj))
if _type == "sequence":
_feature = unsimplify(obj).pop(_type)
return {"feature": from_yaml_inner(_feature), **obj, "_type": "Sequence"}
if _type == "list":
return [from_yaml_inner(unsimplify(obj)[_type])]
if _type == "struct":
return from_yaml_inner(obj["struct"])
elif _type == "dtype":
if isinstance(obj["dtype"], str):
# e.g. int32, float64, string, audio, image
try:
Value(obj["dtype"])
return {**obj, "_type": "Value"}
except ValueError:
# e.g. Audio, Image, ArrayXD
return {"_type": snakecase_to_camelcase(obj["dtype"])}
else:
return from_yaml_inner(obj["dtype"])
else:
return {"_type": snakecase_to_camelcase(_type), **unsimplify(obj)[_type]}
elif isinstance(obj, list):
names = [_feature.pop("name") for _feature in obj]
return {name: from_yaml_inner(_feature) for name, _feature in zip(names, obj)}
else:
raise TypeError(f"Expected a dict or a list but got {type(obj)}: {obj}")
return cls.from_dict(from_yaml_inner(yaml_data))
def encode_example(self, example):
"""
Encode example into a format for Arrow.
Args:
example (`dict[str, Any]`):
Data in a Dataset row.
Returns:
`dict[str, Any]`
"""
example = cast_to_python_objects(example)
return encode_nested_example(self, example)
def encode_column(self, column, column_name: str):
"""
Encode column into a format for Arrow.
Args:
column (`list[Any]`):
Data in a Dataset column.
column_name (`str`):
Dataset column name.
Returns:
`list[Any]`
"""
column = cast_to_python_objects(column)
return [encode_nested_example(self[column_name], obj, level=1) for obj in column]
def encode_batch(self, batch):
"""
Encode batch into a format for Arrow.
Args:
batch (`dict[str, list[Any]]`):
Data in a Dataset batch.
Returns:
`dict[str, list[Any]]`
"""
encoded_batch = {}
if set(batch) != set(self):
raise ValueError(f"Column mismatch between batch {set(batch)} and features {set(self)}")
for key, column in batch.items():
column = cast_to_python_objects(column)
encoded_batch[key] = [encode_nested_example(self[key], obj, level=1) for obj in column]
return encoded_batch
def decode_example(self, example: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None):
"""Decode example with custom feature decoding.
Args:
example (`dict[str, Any]`):
Dataset row data.
token_per_repo_id (`dict`, *optional*):
To access and decode audio or image files from private repositories on the Hub, you can pass
a dictionary `repo_id (str) -> token (bool or str)`.
Returns:
`dict[str, Any]`
"""
return {
column_name: decode_nested_example(feature, value, token_per_repo_id=token_per_repo_id)
if self._column_requires_decoding[column_name]
else value
for column_name, (feature, value) in zip_dict(
{key: value for key, value in self.items() if key in example}, example
)
}
def decode_column(self, column: list, column_name: str):
"""Decode column with custom feature decoding.
Args:
column (`list[Any]`):
Dataset column data.
column_name (`str`):
Dataset column name.
Returns:
`list[Any]`
"""
return (
[decode_nested_example(self[column_name], value) if value is not None else None for value in column]
if self._column_requires_decoding[column_name]
else column
)
def decode_batch(self, batch: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None):
"""Decode batch with custom feature decoding.
Args:
batch (`dict[str, list[Any]]`):
Dataset batch data.
token_per_repo_id (`dict`, *optional*):
To access and decode audio or image files from private repositories on the Hub, you can pass
a dictionary repo_id (str) -> token (bool or str)
Returns:
`dict[str, list[Any]]`
"""
decoded_batch = {}
for column_name, column in batch.items():
decoded_batch[column_name] = (
[
decode_nested_example(self[column_name], value, token_per_repo_id=token_per_repo_id)
if value is not None
else None
for value in column
]
if self._column_requires_decoding[column_name]
else column
)
return decoded_batch
def copy(self) -> "Features":
"""
Make a deep copy of [`Features`].
Returns:
[`Features`]
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train")
>>> copy_of_features = ds.features.copy()
>>> copy_of_features
{'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
'text': Value(dtype='string', id=None)}
```
"""
return copy.deepcopy(self)
def reorder_fields_as(self, other: "Features") -> "Features":
"""
Reorder Features fields to match the field order of other [`Features`].
The order of the fields is important since it matters for the underlying arrow data.
Re-ordering the fields allows to make the underlying arrow data type match.
Args:
other ([`Features`]):
The other [`Features`] to align with.
Returns:
[`Features`]
Example::
>>> from datasets import Features, Sequence, Value
>>> # let's say we have to features with a different order of nested fields (for a and b for example)
>>> f1 = Features({"root": Sequence({"a": Value("string"), "b": Value("string")})})
>>> f2 = Features({"root": {"b": Sequence(Value("string")), "a": Sequence(Value("string"))}})
>>> assert f1.type != f2.type
>>> # re-ordering keeps the base structure (here Sequence is defined at the root level), but make the fields order match
>>> f1.reorder_fields_as(f2)
{'root': Sequence(feature={'b': Value(dtype='string', id=None), 'a': Value(dtype='string', id=None)}, length=-1, id=None)}
>>> assert f1.reorder_fields_as(f2).type == f2.type
"""
def recursive_reorder(source, target, stack=""):
stack_position = " at " + stack[1:] if stack else ""
if isinstance(target, Sequence):
target = target.feature
if isinstance(target, dict):
target = {k: [v] for k, v in target.items()}
else:
target = [target]
if isinstance(source, Sequence):
source, id_, length = source.feature, source.id, source.length
if isinstance(source, dict):
source = {k: [v] for k, v in source.items()}
reordered = recursive_reorder(source, target, stack)
return Sequence({k: v[0] for k, v in reordered.items()}, id=id_, length=length)
else:
source = [source]
reordered = recursive_reorder(source, target, stack)
return Sequence(reordered[0], id=id_, length=length)
elif isinstance(source, dict):
if not isinstance(target, dict):
raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position)
if sorted(source) != sorted(target):
message = (
f"Keys mismatch: between {source} (source) and {target} (target).\n"
f"{source.keys()-target.keys()} are missing from target "
f"and {target.keys()-source.keys()} are missing from source" + stack_position
)
raise ValueError(message)
return {key: recursive_reorder(source[key], target[key], stack + f".{key}") for key in target}
elif isinstance(source, list):
if not isinstance(target, list):
raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position)
if len(source) != len(target):
raise ValueError(f"Length mismatch: between {source} and {target}" + stack_position)
return [recursive_reorder(source[i], target[i], stack + ".<list>") for i in range(len(target))]
else:
return source
return Features(recursive_reorder(self, other))
def flatten(self, max_depth=16) -> "Features":
"""Flatten the features. Every dictionary column is removed and is replaced by
all the subfields it contains. The new fields are named by concatenating the
name of the original column and the subfield name like this: `<original>.<subfield>`.
If a column contains nested dictionaries, then all the lower-level subfields names are
also concatenated to form new columns: `<original>.<subfield>.<subsubfield>`, etc.
Returns:
[`Features`]:
The flattened features.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("squad", split="train")
>>> ds.features.flatten()
{'answers.answer_start': Sequence(feature=Value(dtype='int32', id=None), length=-1, id=None),
'answers.text': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None),
'context': Value(dtype='string', id=None),
'id': Value(dtype='string', id=None),
'question': Value(dtype='string', id=None),
'title': Value(dtype='string', id=None)}
```
"""
for depth in range(1, max_depth):
no_change = True
flattened = self.copy()
for column_name, subfeature in self.items():
if isinstance(subfeature, dict):
no_change = False
flattened.update({f"{column_name}.{k}": v for k, v in subfeature.items()})
del flattened[column_name]
elif isinstance(subfeature, Sequence) and isinstance(subfeature.feature, dict):
no_change = False
flattened.update(
{
f"{column_name}.{k}": Sequence(v) if not isinstance(v, dict) else [v]
for k, v in subfeature.feature.items()
}
)
del flattened[column_name]
elif hasattr(subfeature, "flatten") and subfeature.flatten() != subfeature:
no_change = False
flattened.update({f"{column_name}.{k}": v for k, v in subfeature.flatten().items()})
del flattened[column_name]
self = flattened
if no_change:
break
return self
def _align_features(features_list: List[Features]) -> List[Features]:
"""Align dictionaries of features so that the keys that are found in multiple dictionaries share the same feature."""
name2feature = {}
for features in features_list:
for k, v in features.items():
if k in name2feature and isinstance(v, dict):
# Recursively align features.
name2feature[k] = _align_features([name2feature[k], v])[0]
elif k not in name2feature or (isinstance(name2feature[k], Value) and name2feature[k].dtype == "null"):
name2feature[k] = v
return [Features({k: name2feature[k] for k in features.keys()}) for features in features_list]
def _check_if_features_can_be_aligned(features_list: List[Features]):
"""Check if the dictionaries of features can be aligned.
Two dictonaries of features can be aligned if the keys they share have the same type or some of them is of type `Value("null")`.
"""
name2feature = {}
for features in features_list:
for k, v in features.items():
if k not in name2feature or (isinstance(name2feature[k], Value) and name2feature[k].dtype == "null"):
name2feature[k] = v
for features in features_list:
for k, v in features.items():
if isinstance(v, dict) and isinstance(name2feature[k], dict):
# Deep checks for structure.
_check_if_features_can_be_aligned([name2feature[k], v])
elif not (isinstance(v, Value) and v.dtype == "null") and name2feature[k] != v:
raise ValueError(
f'The features can\'t be aligned because the key {k} of features {features} has unexpected type - {v} (expected either {name2feature[k]} or Value("null").'
)
| datasets/src/datasets/features/features.py/0 | {
"file_path": "datasets/src/datasets/features/features.py",
"repo_id": "datasets",
"token_count": 40149
} | 76 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import datasets
from datasets.table import table_cast
logger = datasets.utils.logging.get_logger(__name__)
@dataclass
class ArrowConfig(datasets.BuilderConfig):
"""BuilderConfig for Arrow."""
features: Optional[datasets.Features] = None
class Arrow(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = ArrowConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
"""We handle string, list and dicts in datafiles"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
data_files = dl_manager.download_and_extract(self.config.data_files)
if isinstance(data_files, (str, list, tuple)):
files = data_files
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(files):
with open(file, "rb") as f:
self.info.features = datasets.Features.from_arrow_schema(pa.ipc.open_stream(f).schema)
break
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
return splits
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, self.info.features.arrow_schema)
return pa_table
def _generate_tables(self, files):
for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
with open(file, "rb") as f:
try:
for batch_idx, record_batch in enumerate(pa.ipc.open_stream(f)):
pa_table = pa.Table.from_batches([record_batch])
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table)
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
raise
| datasets/src/datasets/packaged_modules/arrow/arrow.py/0 | {
"file_path": "datasets/src/datasets/packaged_modules/arrow/arrow.py",
"repo_id": "datasets",
"token_count": 1473
} | 77 |
import itertools
import warnings
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class PandasConfig(datasets.BuilderConfig):
"""BuilderConfig for Pandas."""
features: Optional[datasets.Features] = None
class Pandas(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = PandasConfig
def _info(self):
warnings.warn(
"The Pandas builder is deprecated and will be removed in the next major version of datasets.",
FutureWarning,
)
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
"""We handle string, list and dicts in datafiles"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
data_files = dl_manager.download_and_extract(self.config.data_files)
if isinstance(data_files, (str, list, tuple)):
files = data_files
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
return splits
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, self.config.features.arrow_schema)
return pa_table
def _generate_tables(self, files):
for i, file in enumerate(itertools.chain.from_iterable(files)):
with open(file, "rb") as f:
pa_table = pa.Table.from_pandas(pd.read_pickle(f))
yield i, self._cast_table(pa_table)
| datasets/src/datasets/packaged_modules/pandas/pandas.py/0 | {
"file_path": "datasets/src/datasets/packaged_modules/pandas/pandas.py",
"repo_id": "datasets",
"token_count": 1011
} | 78 |
import importlib
import inspect
from functools import wraps
from typing import TYPE_CHECKING, Optional
from .download.download_config import DownloadConfig
from .download.streaming_download_manager import (
xbasename,
xdirname,
xet_parse,
xexists,
xgetsize,
xglob,
xgzip_open,
xisdir,
xisfile,
xjoin,
xlistdir,
xnumpy_load,
xopen,
xpandas_read_csv,
xpandas_read_excel,
xPath,
xpyarrow_parquet_read_table,
xrelpath,
xsio_loadmat,
xsplit,
xsplitext,
xwalk,
xxml_dom_minidom_parse,
)
from .utils.logging import get_logger
from .utils.patching import patch_submodule
from .utils.py_utils import get_imports, lock_importable_file
logger = get_logger(__name__)
if TYPE_CHECKING:
from .builder import DatasetBuilder
def extend_module_for_streaming(module_path, download_config: Optional[DownloadConfig] = None):
"""Extend the module to support streaming.
We patch some functions in the module to use `fsspec` to support data streaming:
- We use `fsspec.open` to open and read remote files. We patch the module function:
- `open`
- We use the "::" hop separator to join paths and navigate remote compressed/archive files. We patch the module
functions:
- `os.path.join`
- `pathlib.Path.joinpath` and `pathlib.Path.__truediv__` (called when using the "/" operator)
The patched functions are replaced with custom functions defined to work with the
:class:`~download.streaming_download_manager.StreamingDownloadManager`.
Args:
module_path: Path to the module to be extended.
download_config : mainly use use_auth_token or storage_options to support different platforms and auth types.
"""
module = importlib.import_module(module_path)
# TODO(QL): always update the module to add subsequent new authentication without removing old ones
if hasattr(module, "_patched_for_streaming") and module._patched_for_streaming:
if isinstance(module._patched_for_streaming, DownloadConfig):
module._patched_for_streaming.token = download_config.token
module._patched_for_streaming.storage_options = download_config.storage_options
return
def wrap_auth(function):
@wraps(function)
def wrapper(*args, **kwargs):
return function(*args, download_config=download_config, **kwargs)
wrapper._decorator_name_ = "wrap_auth"
return wrapper
# open files in a streaming fashion
patch_submodule(module, "open", wrap_auth(xopen)).start()
patch_submodule(module, "os.listdir", wrap_auth(xlistdir)).start()
patch_submodule(module, "os.walk", wrap_auth(xwalk)).start()
patch_submodule(module, "glob.glob", wrap_auth(xglob)).start()
# allow to navigate in remote zip files
patch_submodule(module, "os.path.join", xjoin).start()
patch_submodule(module, "os.path.dirname", xdirname).start()
patch_submodule(module, "os.path.basename", xbasename).start()
patch_submodule(module, "os.path.relpath", xrelpath).start()
patch_submodule(module, "os.path.split", xsplit).start()
patch_submodule(module, "os.path.splitext", xsplitext).start()
# allow checks on paths
patch_submodule(module, "os.path.exists", wrap_auth(xexists)).start()
patch_submodule(module, "os.path.isdir", wrap_auth(xisdir)).start()
patch_submodule(module, "os.path.isfile", wrap_auth(xisfile)).start()
patch_submodule(module, "os.path.getsize", wrap_auth(xgetsize)).start()
patch_submodule(module, "pathlib.Path", xPath).start()
# file readers
patch_submodule(module, "gzip.open", wrap_auth(xgzip_open)).start()
patch_submodule(module, "numpy.load", wrap_auth(xnumpy_load)).start()
patch_submodule(module, "pandas.read_csv", wrap_auth(xpandas_read_csv), attrs=["__version__"]).start()
patch_submodule(module, "pandas.read_excel", wrap_auth(xpandas_read_excel), attrs=["__version__"]).start()
patch_submodule(module, "scipy.io.loadmat", wrap_auth(xsio_loadmat), attrs=["__version__"]).start()
patch_submodule(module, "xml.etree.ElementTree.parse", wrap_auth(xet_parse)).start()
patch_submodule(module, "xml.dom.minidom.parse", wrap_auth(xxml_dom_minidom_parse)).start()
# pyarrow: do not patch pyarrow attribute in packaged modules
if not module.__name__.startswith("datasets.packaged_modules."):
patch_submodule(module, "pyarrow.parquet.read_table", wrap_auth(xpyarrow_parquet_read_table)).start()
module._patched_for_streaming = download_config
def extend_dataset_builder_for_streaming(builder: "DatasetBuilder"):
"""Extend the dataset builder module and the modules imported by it to support streaming.
Args:
builder (:class:`DatasetBuilder`): Dataset builder instance.
"""
# this extends the open and os.path.join functions for data streaming
download_config = DownloadConfig(storage_options=builder.storage_options, token=builder.token)
extend_module_for_streaming(builder.__module__, download_config=download_config)
# if needed, we also have to extend additional internal imports (like wmt14 -> wmt_utils)
if not builder.__module__.startswith("datasets."): # check that it's not a packaged builder like csv
importable_file = inspect.getfile(builder.__class__)
with lock_importable_file(importable_file):
for imports in get_imports(importable_file):
if imports[0] == "internal":
internal_import_name = imports[1]
internal_module_name = ".".join(builder.__module__.split(".")[:-1] + [internal_import_name])
extend_module_for_streaming(internal_module_name, download_config=download_config)
# builders can inherit from other builders that might use streaming functionality
# (for example, ImageFolder and AudioFolder inherit from FolderBuilder which implements examples generation)
# but these parents builders are not patched automatically as they are not instantiated, so we patch them here
from .builder import DatasetBuilder
parent_builder_modules = [
cls.__module__
for cls in type(builder).__mro__[1:] # make sure it's not the same module we've already patched
if issubclass(cls, DatasetBuilder) and cls.__module__ != DatasetBuilder.__module__
] # check it's not a standard builder from datasets.builder
for module in parent_builder_modules:
extend_module_for_streaming(module, download_config=download_config)
| datasets/src/datasets/streaming.py/0 | {
"file_path": "datasets/src/datasets/streaming.py",
"repo_id": "datasets",
"token_count": 2364
} | 79 |
import enum
import inspect
import warnings
from functools import wraps
from typing import Callable, Optional
from .logging import get_logger
_emitted_deprecation_warnings = set()
logger = get_logger(__name__)
def deprecated(help_message: Optional[str] = None):
"""Decorator to mark a class or a function as deprecated.
Args:
help_message (:obj:`str`, optional): An optional message to guide the user on how to
switch to non-deprecated usage of the library.
"""
def decorator(deprecated_class_or_function: Callable):
global _emitted_deprecation_warnings
if inspect.isclass(deprecated_class_or_function):
deprecated_function = deprecated_class_or_function.__init__
name = deprecated_class_or_function.__name__
else:
deprecated_function = deprecated_class_or_function
name = deprecated_function.__name__
# Support deprecating __init__ class method: class name instead
name = name if name != "__init__" else deprecated_function.__qualname__.split(".")[-2]
warning_msg = (
f"{name} is deprecated and will be removed in the next major version of datasets." + f" {help_message}"
if help_message
else ""
)
@wraps(deprecated_function)
def wrapper(*args, **kwargs):
func_hash = hash(deprecated_function)
if func_hash not in _emitted_deprecation_warnings:
warnings.warn(warning_msg, category=FutureWarning, stacklevel=2)
_emitted_deprecation_warnings.add(func_hash)
return deprecated_function(*args, **kwargs)
wrapper._decorator_name_ = "deprecated"
if inspect.isclass(deprecated_class_or_function):
deprecated_class_or_function.__init__ = wrapper
return deprecated_class_or_function
else:
return wrapper
return decorator
class OnAccess(enum.EnumMeta):
"""
Enum metaclass that calls a user-specified function whenever a member is accessed.
"""
def __getattribute__(cls, name):
obj = super().__getattribute__(name)
if isinstance(obj, enum.Enum) and obj._on_access:
obj._on_access()
return obj
def __getitem__(cls, name):
member = super().__getitem__(name)
if member._on_access:
member._on_access()
return member
def __call__(cls, value, names=None, *, module=None, qualname=None, type=None, start=1):
obj = super().__call__(value, names, module=module, qualname=qualname, type=type, start=start)
if isinstance(obj, enum.Enum) and obj._on_access:
obj._on_access()
return obj
class DeprecatedEnum(enum.Enum, metaclass=OnAccess):
"""
Enum class that calls `deprecate` method whenever a member is accessed.
"""
def __new__(cls, value):
member = object.__new__(cls)
member._value_ = value
member._on_access = member.deprecate
return member
@property
def help_message(self):
return ""
def deprecate(self):
help_message = f" {self.help_message}" if self.help_message else ""
warnings.warn(
f"'{self.__objclass__.__name__}' is deprecated and will be removed in the next major version of datasets."
+ help_message,
FutureWarning,
stacklevel=3,
)
| datasets/src/datasets/utils/deprecation_utils.py/0 | {
"file_path": "datasets/src/datasets/utils/deprecation_utils.py",
"repo_id": "datasets",
"token_count": 1426
} | 80 |
{
"code": "Programming language (C++, Java, Javascript, Python, etc.)",
"aa": "Afar",
"aaa": "Ghotuo",
"aab": "Alumu-Tesu",
"aac": "Ari",
"aad": "Amal",
"aae": "Arbëreshë Albanian",
"aaf": "Aranadan",
"aag": "Ambrak",
"aah": "Abu' Arapesh",
"aai": "Arifama-Miniafia",
"aak": "Ankave",
"aal": "Afade",
"aan": "Anambé",
"aao": "Algerian Saharan Arabic",
"aap": "Pará Arára",
"aaq": "Eastern Abnaki",
"aas": "Aasáx",
"aat": "Arvanitika Albanian",
"aau": "Abau",
"aav": "Austro-Asiatic languages",
"aaw": "Solong",
"aax": "Mandobo Atas",
"aaz": "Amarasi",
"ab": "Abkhazian",
"aba": "Abé",
"abb": "Bankon",
"abc": "Ambala Ayta",
"abd": "Manide",
"abe": "Western Abnaki",
"abf": "Abai Sungai",
"abg": "Abaga",
"abh": "Tajiki Arabic",
"abi": "Abidji",
"abj": "Aka-Bea",
"abl": "Lampung Nyo",
"abm": "Abanyom",
"abn": "Abua",
"abo": "Abon",
"abp": "Abellen Ayta",
"abq": "Abaza",
"abr": "Abron",
"abs": "Ambonese Malay",
"abt": "Ambulas",
"abu": "Abure",
"abv": "Baharna Arabic",
"abw": "Pal",
"abx": "Inabaknon",
"aby": "Aneme Wake",
"abz": "Abui",
"aca": "Achagua",
"acb": "Áncá",
"acd": "Gikyode",
"ace": "Achinese",
"acf": "Saint Lucian Creole French",
"ach": "Acoli",
"aci": "Aka-Cari",
"ack": "Aka-Kora",
"acl": "Akar-Bale",
"acm": "Mesopotamian Arabic",
"acn": "Achang",
"acp": "Eastern Acipa",
"acq": "Ta'izzi-Adeni Arabic",
"acr": "Achi",
"acs": "Acroá",
"act": "Achterhoeks",
"acu": "Achuar-Shiwiar",
"acv": "Achumawi",
"acw": "Hijazi Arabic",
"acx": "Omani Arabic",
"acy": "Cypriot Arabic",
"acz": "Acheron",
"ada": "Adangme",
"adb": "Atauran",
"add": "Lidzonka; Dzodinka",
"ade": "Adele",
"adf": "Dhofari Arabic",
"adg": "Andegerebinha",
"adh": "Adhola",
"adi": "Adi",
"adj": "Adioukrou",
"adl": "Galo",
"adn": "Adang",
"ado": "Abu",
"adq": "Adangbe",
"adr": "Adonara",
"ads": "Adamorobe Sign Language",
"adt": "Adnyamathanha",
"adu": "Aduge",
"adw": "Amundava",
"adx": "Amdo Tibetan",
"ady": "Adyghe; Adygei",
"adz": "Adzera",
"ae": "Avestan",
"aea": "Areba",
"aeb": "Tunisian Arabic",
"aec": "Saidi Arabic",
"aed": "Argentine Sign Language",
"aee": "Northeast Pashai; Northeast Pashayi",
"aek": "Haeke",
"ael": "Ambele",
"aem": "Arem",
"aen": "Armenian Sign Language",
"aeq": "Aer",
"aer": "Eastern Arrernte",
"aes": "Alsea",
"aeu": "Akeu",
"aew": "Ambakich",
"aey": "Amele",
"aez": "Aeka",
"af": "Afrikaans",
"afa": "Afro-Asiatic languages",
"afb": "Gulf Arabic",
"afd": "Andai",
"afe": "Putukwam",
"afg": "Afghan Sign Language",
"afh": "Afrihili",
"afi": "Akrukay; Chini",
"afk": "Nanubae",
"afn": "Defaka",
"afo": "Eloyi",
"afp": "Tapei",
"afs": "Afro-Seminole Creole",
"aft": "Afitti",
"afu": "Awutu",
"afz": "Obokuitai",
"aga": "Aguano",
"agb": "Legbo",
"agc": "Agatu",
"agd": "Agarabi",
"age": "Angal",
"agf": "Arguni",
"agg": "Angor",
"agh": "Ngelima",
"agi": "Agariya",
"agj": "Argobba",
"agk": "Isarog Agta",
"agl": "Fembe",
"agm": "Angaataha",
"agn": "Agutaynen",
"ago": "Tainae",
"agq": "Aghem",
"agr": "Aguaruna",
"ags": "Esimbi",
"agt": "Central Cagayan Agta",
"agu": "Aguacateco",
"agv": "Remontado Dumagat",
"agw": "Kahua",
"agx": "Aghul",
"agy": "Southern Alta",
"agz": "Mt. Iriga Agta",
"aha": "Ahanta",
"ahb": "Axamb",
"ahg": "Qimant",
"ahh": "Aghu",
"ahi": "Tiagbamrin Aizi",
"ahk": "Akha",
"ahl": "Igo",
"ahm": "Mobumrin Aizi",
"ahn": "Àhàn",
"aho": "Ahom",
"ahp": "Aproumu Aizi",
"ahr": "Ahirani",
"ahs": "Ashe",
"aht": "Ahtena",
"aia": "Arosi",
"aib": "Ainu (China)",
"aic": "Ainbai",
"aid": "Alngith",
"aie": "Amara",
"aif": "Agi",
"aig": "Antigua and Barbuda Creole English",
"aih": "Ai-Cham",
"aii": "Assyrian Neo-Aramaic",
"aij": "Lishanid Noshan",
"aik": "Ake",
"ail": "Aimele",
"aim": "Aimol",
"ain": "Ainu (Japan)",
"aio": "Aiton",
"aip": "Burumakok",
"aiq": "Aimaq",
"air": "Airoran",
"ait": "Arikem",
"aiw": "Aari",
"aix": "Aighon",
"aiy": "Ali",
"aja": "Aja (South Sudan)",
"ajg": "Aja (Benin)",
"aji": "Ajië",
"ajn": "Andajin",
"ajp": "South Levantine Arabic",
"ajs": "Algerian Jewish Sign Language",
"aju": "Judeo-Moroccan Arabic",
"ajw": "Ajawa",
"ajz": "Amri Karbi",
"ak": "Akan",
"akb": "Batak Angkola",
"akc": "Mpur",
"akd": "Ukpet-Ehom",
"ake": "Akawaio",
"akf": "Akpa",
"akg": "Anakalangu",
"akh": "Angal Heneng",
"aki": "Aiome",
"akj": "Aka-Jeru",
"akk": "Akkadian",
"akl": "Aklanon",
"akm": "Aka-Bo",
"ako": "Akurio",
"akp": "Siwu",
"akq": "Ak",
"akr": "Araki",
"aks": "Akaselem",
"akt": "Akolet",
"aku": "Akum",
"akv": "Akhvakh",
"akw": "Akwa",
"akx": "Aka-Kede",
"aky": "Aka-Kol",
"akz": "Alabama",
"ala": "Alago",
"alc": "Qawasqar",
"ald": "Alladian",
"ale": "Aleut",
"alf": "Alege",
"alg": "Algonquian languages",
"alh": "Alawa",
"ali": "Amaimon",
"alj": "Alangan",
"alk": "Alak",
"all": "Allar",
"alm": "Amblong",
"aln": "Gheg Albanian",
"alo": "Larike-Wakasihu",
"alp": "Alune",
"alq": "Algonquin",
"alr": "Alutor",
"als": "Tosk Albanian",
"alt": "Southern Altai",
"alu": "'Are'are",
"alv": "Atlantic-Congo languages",
"alw": "Alaba-K’abeena; Wanbasana",
"alx": "Amol",
"aly": "Alyawarr",
"alz": "Alur",
"am": "Amharic",
"ama": "Amanayé",
"amb": "Ambo",
"amc": "Amahuaca",
"ame": "Yanesha'",
"amf": "Hamer-Banna",
"amg": "Amurdak",
"ami": "Amis",
"amj": "Amdang",
"amk": "Ambai",
"aml": "War-Jaintia",
"amm": "Ama (Papua New Guinea)",
"amn": "Amanab",
"amo": "Amo",
"amp": "Alamblak",
"amq": "Amahai",
"amr": "Amarakaeri",
"ams": "Southern Amami-Oshima",
"amt": "Amto",
"amu": "Guerrero Amuzgo",
"amv": "Ambelau",
"amw": "Western Neo-Aramaic",
"amx": "Anmatyerre",
"amy": "Ami",
"amz": "Atampaya",
"an": "Aragonese",
"ana": "Andaqui",
"anb": "Andoa",
"anc": "Ngas",
"and": "Ansus",
"ane": "Xârâcùù",
"anf": "Animere",
"ang": "Old English (ca. 450-1100)",
"anh": "Nend",
"ani": "Andi",
"anj": "Anor",
"ank": "Goemai",
"anl": "Anu-Hkongso Chin",
"anm": "Anal",
"ann": "Obolo",
"ano": "Andoque",
"anp": "Angika",
"anq": "Jarawa (India)",
"anr": "Andh",
"ans": "Anserma",
"ant": "Antakarinya; Antikarinya",
"anu": "Anuak",
"anv": "Denya",
"anw": "Anaang",
"anx": "Andra-Hus",
"any": "Anyin",
"anz": "Anem",
"aoa": "Angolar",
"aob": "Abom",
"aoc": "Pemon",
"aod": "Andarum",
"aoe": "Angal Enen",
"aof": "Bragat",
"aog": "Angoram",
"aoi": "Anindilyakwa",
"aoj": "Mufian",
"aok": "Arhö",
"aol": "Alor",
"aom": "Ömie",
"aon": "Bumbita Arapesh",
"aor": "Aore",
"aos": "Taikat",
"aot": "Atong (India); A'tong",
"aou": "A'ou",
"aox": "Atorada",
"aoz": "Uab Meto",
"apa": "Apache languages",
"apb": "Sa'a",
"apc": "North Levantine Arabic",
"apd": "Sudanese Arabic",
"ape": "Bukiyip",
"apf": "Pahanan Agta",
"apg": "Ampanang",
"aph": "Athpariya",
"api": "Apiaká",
"apj": "Jicarilla Apache",
"apk": "Kiowa Apache",
"apl": "Lipan Apache",
"apm": "Mescalero-Chiricahua Apache",
"apn": "Apinayé",
"apo": "Ambul",
"app": "Apma",
"apq": "A-Pucikwar",
"apr": "Arop-Lokep",
"aps": "Arop-Sissano",
"apt": "Apatani",
"apu": "Apurinã",
"apv": "Alapmunte",
"apw": "Western Apache",
"apx": "Aputai",
"apy": "Apalaí",
"apz": "Safeyoka",
"aqa": "Alacalufan languages",
"aqc": "Archi",
"aqd": "Ampari Dogon",
"aqg": "Arigidi",
"aqk": "Aninka",
"aql": "Algic languages",
"aqm": "Atohwaim",
"aqn": "Northern Alta",
"aqp": "Atakapa",
"aqr": "Arhâ",
"aqt": "Angaité",
"aqz": "Akuntsu",
"ar": "Arabic",
"arb": "Standard Arabic",
"arc": "Official Aramaic (700-300 BCE); Imperial Aramaic (700-300 BCE)",
"ard": "Arabana",
"are": "Western Arrarnta",
"arh": "Arhuaco",
"ari": "Arikara",
"arj": "Arapaso",
"ark": "Arikapú",
"arl": "Arabela",
"arn": "Mapudungun; Mapuche",
"aro": "Araona",
"arp": "Arapaho",
"arq": "Algerian Arabic",
"arr": "Karo (Brazil)",
"ars": "Najdi Arabic",
"art": "Artificial languages",
"aru": "Aruá (Amazonas State); Arawá",
"arv": "Arbore",
"arw": "Arawak",
"arx": "Aruá (Rodonia State)",
"ary": "Moroccan Arabic",
"arz": "Egyptian Arabic",
"as": "Assamese",
"asa": "Asu (Tanzania)",
"asb": "Assiniboine",
"asc": "Casuarina Coast Asmat",
"ase": "American Sign Language",
"asf": "Auslan; Australian Sign Language",
"asg": "Cishingini",
"ash": "Abishira",
"asi": "Buruwai",
"asj": "Sari",
"ask": "Ashkun",
"asl": "Asilulu",
"asn": "Xingú Asuriní",
"aso": "Dano",
"asp": "Algerian Sign Language",
"asq": "Austrian Sign Language",
"asr": "Asuri",
"ass": "Ipulo",
"ast": "Asturian; Asturleonese; Bable; Leonese",
"asu": "Tocantins Asurini",
"asv": "Asoa",
"asw": "Australian Aborigines Sign Language",
"asx": "Muratayak",
"asy": "Yaosakor Asmat",
"asz": "As",
"ata": "Pele-Ata",
"atb": "Zaiwa",
"atc": "Atsahuaca",
"atd": "Ata Manobo",
"ate": "Atemble",
"atg": "Ivbie North-Okpela-Arhe",
"ath": "Athapascan languages",
"ati": "Attié",
"atj": "Atikamekw",
"atk": "Ati",
"atl": "Mt. Iraya Agta",
"atm": "Ata",
"atn": "Ashtiani",
"ato": "Atong (Cameroon)",
"atp": "Pudtol Atta",
"atq": "Aralle-Tabulahan",
"atr": "Waimiri-Atroari",
"ats": "Gros Ventre",
"att": "Pamplona Atta",
"atu": "Reel",
"atv": "Northern Altai",
"atw": "Atsugewi",
"atx": "Arutani",
"aty": "Aneityum",
"atz": "Arta",
"aua": "Asumboa",
"aub": "Alugu",
"auc": "Waorani",
"aud": "Anuta",
"auf": "Arauan languages",
"aug": "Aguna",
"auh": "Aushi",
"aui": "Anuki",
"auj": "Awjilah",
"auk": "Heyo",
"aul": "Aulua",
"aum": "Asu (Nigeria)",
"aun": "Molmo One",
"auo": "Auyokawa",
"aup": "Makayam",
"auq": "Anus; Korur",
"aur": "Aruek",
"aus": "Australian languages",
"aut": "Austral",
"auu": "Auye",
"auw": "Awyi",
"aux": "Aurá",
"auy": "Awiyaana",
"auz": "Uzbeki Arabic",
"av": "Avaric",
"avb": "Avau",
"avd": "Alviri-Vidari",
"avi": "Avikam",
"avk": "Kotava",
"avl": "Eastern Egyptian Bedawi Arabic",
"avm": "Angkamuthi",
"avn": "Avatime",
"avo": "Agavotaguerra",
"avs": "Aushiri",
"avt": "Au",
"avu": "Avokaya",
"avv": "Avá-Canoeiro",
"awa": "Awadhi",
"awb": "Awa (Papua New Guinea)",
"awc": "Cicipu",
"awd": "Arawakan languages",
"awe": "Awetí",
"awg": "Anguthimri",
"awh": "Awbono",
"awi": "Aekyom",
"awk": "Awabakal",
"awm": "Arawum",
"awn": "Awngi",
"awo": "Awak",
"awr": "Awera",
"aws": "South Awyu",
"awt": "Araweté",
"awu": "Central Awyu",
"awv": "Jair Awyu",
"aww": "Awun",
"awx": "Awara",
"awy": "Edera Awyu",
"axb": "Abipon",
"axe": "Ayerrerenge",
"axg": "Mato Grosso Arára",
"axk": "Yaka (Central African Republic)",
"axl": "Lower Southern Aranda",
"axm": "Middle Armenian",
"axx": "Xârâgurè",
"ay": "Aymara",
"aya": "Awar",
"ayb": "Ayizo Gbe",
"ayc": "Southern Aymara",
"ayd": "Ayabadhu",
"aye": "Ayere",
"ayg": "Ginyanga",
"ayh": "Hadrami Arabic",
"ayi": "Leyigha",
"ayk": "Akuku",
"ayl": "Libyan Arabic",
"ayn": "Sanaani Arabic",
"ayo": "Ayoreo",
"ayp": "North Mesopotamian Arabic",
"ayq": "Ayi (Papua New Guinea)",
"ayr": "Central Aymara",
"ays": "Sorsogon Ayta",
"ayt": "Magbukun Ayta",
"ayu": "Ayu",
"ayz": "Mai Brat",
"az": "Azerbaijani",
"aza": "Azha",
"azb": "South Azerbaijani",
"azc": "Uto-Aztecan languages",
"azd": "Eastern Durango Nahuatl",
"azg": "San Pedro Amuzgos Amuzgo",
"azj": "North Azerbaijani",
"azm": "Ipalapa Amuzgo",
"azn": "Western Durango Nahuatl",
"azo": "Awing",
"azt": "Faire Atta",
"azz": "Highland Puebla Nahuatl",
"ba": "Bashkir",
"baa": "Babatana",
"bab": "Bainouk-Gunyuño",
"bac": "Badui",
"bad": "Banda languages",
"bae": "Baré",
"baf": "Nubaca",
"bag": "Tuki",
"bah": "Bahamas Creole English",
"bai": "Bamileke languages",
"baj": "Barakai",
"bal": "Baluchi",
"ban": "Balinese",
"bao": "Waimaha",
"bap": "Bantawa",
"bar": "Bavarian",
"bas": "Basa (Cameroon)",
"bat": "Baltic languages",
"bau": "Bada (Nigeria)",
"bav": "Vengo",
"baw": "Bambili-Bambui",
"bax": "Bamun",
"bay": "Batuley",
"bba": "Baatonum",
"bbb": "Barai",
"bbc": "Batak Toba",
"bbd": "Bau",
"bbe": "Bangba",
"bbf": "Baibai",
"bbg": "Barama",
"bbh": "Bugan",
"bbi": "Barombi",
"bbj": "Ghomálá'",
"bbk": "Babanki",
"bbl": "Bats",
"bbm": "Babango",
"bbn": "Uneapa",
"bbo": "Northern Bobo Madaré; Konabéré",
"bbp": "West Central Banda",
"bbq": "Bamali",
"bbr": "Girawa",
"bbs": "Bakpinka",
"bbt": "Mburku",
"bbu": "Kulung (Nigeria)",
"bbv": "Karnai",
"bbw": "Baba",
"bbx": "Bubia",
"bby": "Befang",
"bca": "Central Bai",
"bcb": "Bainouk-Samik",
"bcc": "Southern Balochi",
"bcd": "North Babar",
"bce": "Bamenyam",
"bcf": "Bamu",
"bcg": "Baga Pokur",
"bch": "Bariai",
"bci": "Baoulé",
"bcj": "Bardi",
"bck": "Bunuba",
"bcl": "Central Bikol",
"bcm": "Bannoni",
"bcn": "Bali (Nigeria)",
"bco": "Kaluli",
"bcp": "Bali (Democratic Republic of Congo)",
"bcq": "Bench",
"bcr": "Babine",
"bcs": "Kohumono",
"bct": "Bendi",
"bcu": "Awad Bing",
"bcv": "Shoo-Minda-Nye",
"bcw": "Bana",
"bcy": "Bacama",
"bcz": "Bainouk-Gunyaamolo",
"bda": "Bayot",
"bdb": "Basap",
"bdc": "Emberá-Baudó",
"bdd": "Bunama",
"bde": "Bade",
"bdf": "Biage",
"bdg": "Bonggi",
"bdh": "Baka (South Sudan)",
"bdi": "Burun",
"bdj": "Bai (South Sudan); Bai",
"bdk": "Budukh",
"bdl": "Indonesian Bajau",
"bdm": "Buduma",
"bdn": "Baldemu",
"bdo": "Morom",
"bdp": "Bende",
"bdq": "Bahnar",
"bdr": "West Coast Bajau",
"bds": "Burunge",
"bdt": "Bokoto",
"bdu": "Oroko",
"bdv": "Bodo Parja",
"bdw": "Baham",
"bdx": "Budong-Budong",
"bdy": "Bandjalang",
"bdz": "Badeshi",
"be": "Belarusian",
"bea": "Beaver",
"beb": "Bebele",
"bec": "Iceve-Maci",
"bed": "Bedoanas",
"bee": "Byangsi",
"bef": "Benabena",
"beg": "Belait",
"beh": "Biali",
"bei": "Bekati'",
"bej": "Beja; Bedawiyet",
"bek": "Bebeli",
"bem": "Bemba (Zambia)",
"beo": "Beami",
"bep": "Besoa",
"beq": "Beembe",
"ber": "Berber languages",
"bes": "Besme",
"bet": "Guiberoua Béte",
"beu": "Blagar",
"bev": "Daloa Bété",
"bew": "Betawi",
"bex": "Jur Modo",
"bey": "Beli (Papua New Guinea)",
"bez": "Bena (Tanzania)",
"bfa": "Bari",
"bfb": "Pauri Bareli",
"bfc": "Panyi Bai; Northern Bai",
"bfd": "Bafut",
"bfe": "Betaf; Tena",
"bff": "Bofi",
"bfg": "Busang Kayan",
"bfh": "Blafe",
"bfi": "British Sign Language",
"bfj": "Bafanji",
"bfk": "Ban Khor Sign Language",
"bfl": "Banda-Ndélé",
"bfm": "Mmen",
"bfn": "Bunak",
"bfo": "Malba Birifor",
"bfp": "Beba",
"bfq": "Badaga",
"bfr": "Bazigar",
"bfs": "Southern Bai",
"bft": "Balti",
"bfu": "Gahri",
"bfw": "Bondo",
"bfx": "Bantayanon",
"bfy": "Bagheli",
"bfz": "Mahasu Pahari",
"bg": "Bulgarian",
"bga": "Gwamhi-Wuri",
"bgb": "Bobongko",
"bgc": "Haryanvi",
"bgd": "Rathwi Bareli",
"bge": "Bauria",
"bgf": "Bangandu",
"bgg": "Bugun",
"bgi": "Giangan",
"bgj": "Bangolan",
"bgk": "Bit; Buxinhua",
"bgl": "Bo (Laos)",
"bgn": "Western Balochi",
"bgo": "Baga Koga",
"bgp": "Eastern Balochi",
"bgq": "Bagri",
"bgr": "Bawm Chin",
"bgs": "Tagabawa",
"bgt": "Bughotu",
"bgu": "Mbongno",
"bgv": "Warkay-Bipim",
"bgw": "Bhatri",
"bgx": "Balkan Gagauz Turkish",
"bgy": "Benggoi",
"bgz": "Banggai",
"bh": "Bihari languages",
"bha": "Bharia",
"bhb": "Bhili",
"bhc": "Biga",
"bhd": "Bhadrawahi",
"bhe": "Bhaya",
"bhf": "Odiai",
"bhg": "Binandere",
"bhh": "Bukharic",
"bhi": "Bhilali",
"bhj": "Bahing",
"bhl": "Bimin",
"bhm": "Bathari",
"bhn": "Bohtan Neo-Aramaic",
"bho": "Bhojpuri",
"bhp": "Bima",
"bhq": "Tukang Besi South",
"bhr": "Bara Malagasy",
"bhs": "Buwal",
"bht": "Bhattiyali",
"bhu": "Bhunjia",
"bhv": "Bahau",
"bhw": "Biak",
"bhx": "Bhalay",
"bhy": "Bhele",
"bhz": "Bada (Indonesia)",
"bi": "Bislama",
"bia": "Badimaya",
"bib": "Bissa; Bisa",
"bid": "Bidiyo",
"bie": "Bepour",
"bif": "Biafada",
"big": "Biangai",
"bik": "Bikol",
"bil": "Bile",
"bim": "Bimoba",
"bin": "Bini; Edo",
"bio": "Nai",
"bip": "Bila",
"biq": "Bipi",
"bir": "Bisorio",
"bit": "Berinomo",
"biu": "Biete",
"biv": "Southern Birifor",
"biw": "Kol (Cameroon)",
"bix": "Bijori",
"biy": "Birhor",
"biz": "Baloi",
"bja": "Budza",
"bjb": "Banggarla",
"bjc": "Bariji",
"bje": "Biao-Jiao Mien",
"bjf": "Barzani Jewish Neo-Aramaic",
"bjg": "Bidyogo",
"bjh": "Bahinemo",
"bji": "Burji",
"bjj": "Kanauji",
"bjk": "Barok",
"bjl": "Bulu (Papua New Guinea)",
"bjm": "Bajelani",
"bjn": "Banjar",
"bjo": "Mid-Southern Banda",
"bjp": "Fanamaket",
"bjr": "Binumarien",
"bjs": "Bajan",
"bjt": "Balanta-Ganja",
"bju": "Busuu",
"bjv": "Bedjond",
"bjw": "Bakwé",
"bjx": "Banao Itneg",
"bjy": "Bayali",
"bjz": "Baruga",
"bka": "Kyak",
"bkc": "Baka (Cameroon)",
"bkd": "Binukid; Talaandig",
"bkf": "Beeke",
"bkg": "Buraka",
"bkh": "Bakoko",
"bki": "Baki",
"bkj": "Pande",
"bkk": "Brokskat",
"bkl": "Berik",
"bkm": "Kom (Cameroon)",
"bkn": "Bukitan",
"bko": "Kwa'",
"bkp": "Boko (Democratic Republic of Congo)",
"bkq": "Bakairí",
"bkr": "Bakumpai",
"bks": "Northern Sorsoganon",
"bkt": "Boloki",
"bku": "Buhid",
"bkv": "Bekwarra",
"bkw": "Bekwel",
"bkx": "Baikeno",
"bky": "Bokyi",
"bkz": "Bungku",
"bla": "Siksika",
"blb": "Bilua",
"blc": "Bella Coola",
"bld": "Bolango",
"ble": "Balanta-Kentohe",
"blf": "Buol",
"blh": "Kuwaa",
"bli": "Bolia",
"blj": "Bolongan",
"blk": "Pa'o Karen; Pa'O",
"bll": "Biloxi",
"blm": "Beli (South Sudan)",
"bln": "Southern Catanduanes Bikol",
"blo": "Anii",
"blp": "Blablanga",
"blq": "Baluan-Pam",
"blr": "Blang",
"bls": "Balaesang",
"blt": "Tai Dam",
"blv": "Kibala; Bolo",
"blw": "Balangao",
"blx": "Mag-Indi Ayta",
"bly": "Notre",
"blz": "Balantak",
"bm": "Bambara",
"bma": "Lame",
"bmb": "Bembe",
"bmc": "Biem",
"bmd": "Baga Manduri",
"bme": "Limassa",
"bmf": "Bom-Kim",
"bmg": "Bamwe",
"bmh": "Kein",
"bmi": "Bagirmi",
"bmj": "Bote-Majhi",
"bmk": "Ghayavi",
"bml": "Bomboli",
"bmm": "Northern Betsimisaraka Malagasy",
"bmn": "Bina (Papua New Guinea)",
"bmo": "Bambalang",
"bmp": "Bulgebi",
"bmq": "Bomu",
"bmr": "Muinane",
"bms": "Bilma Kanuri",
"bmt": "Biao Mon",
"bmu": "Somba-Siawari",
"bmv": "Bum",
"bmw": "Bomwali",
"bmx": "Baimak",
"bmz": "Baramu",
"bn": "Bengali; Bangla",
"bna": "Bonerate",
"bnb": "Bookan",
"bnc": "Bontok",
"bnd": "Banda (Indonesia)",
"bne": "Bintauna",
"bnf": "Masiwang",
"bng": "Benga",
"bni": "Bangi",
"bnj": "Eastern Tawbuid",
"bnk": "Bierebo",
"bnl": "Boon",
"bnm": "Batanga",
"bnn": "Bunun",
"bno": "Bantoanon",
"bnp": "Bola",
"bnq": "Bantik",
"bnr": "Butmas-Tur",
"bns": "Bundeli",
"bnt": "Bantu languages",
"bnu": "Bentong",
"bnv": "Bonerif; Beneraf; Edwas",
"bnw": "Bisis",
"bnx": "Bangubangu",
"bny": "Bintulu",
"bnz": "Beezen",
"bo": "Tibetan",
"boa": "Bora",
"bob": "Aweer",
"boe": "Mundabli",
"bof": "Bolon",
"bog": "Bamako Sign Language",
"boh": "Boma",
"boi": "Barbareño",
"boj": "Anjam",
"bok": "Bonjo",
"bol": "Bole",
"bom": "Berom",
"bon": "Bine",
"boo": "Tiemacèwè Bozo",
"bop": "Bonkiman",
"boq": "Bogaya",
"bor": "Borôro",
"bot": "Bongo",
"bou": "Bondei",
"bov": "Tuwuli",
"bow": "Rema",
"box": "Buamu",
"boy": "Bodo (Central African Republic)",
"boz": "Tiéyaxo Bozo",
"bpa": "Daakaka",
"bpc": "Mbuk",
"bpd": "Banda-Banda",
"bpe": "Bauni",
"bpg": "Bonggo",
"bph": "Botlikh",
"bpi": "Bagupi",
"bpj": "Binji",
"bpk": "Orowe; 'Ôrôê",
"bpl": "Broome Pearling Lugger Pidgin",
"bpm": "Biyom",
"bpn": "Dzao Min",
"bpo": "Anasi",
"bpp": "Kaure",
"bpq": "Banda Malay",
"bpr": "Koronadal Blaan",
"bps": "Sarangani Blaan",
"bpt": "Barrow Point",
"bpu": "Bongu",
"bpv": "Bian Marind",
"bpw": "Bo (Papua New Guinea)",
"bpx": "Palya Bareli",
"bpy": "Bishnupriya",
"bpz": "Bilba",
"bqa": "Tchumbuli",
"bqb": "Bagusa",
"bqc": "Boko (Benin); Boo",
"bqd": "Bung",
"bqf": "Baga Kaloum",
"bqg": "Bago-Kusuntu",
"bqh": "Baima",
"bqi": "Bakhtiari",
"bqj": "Bandial",
"bqk": "Banda-Mbrès",
"bql": "Bilakura",
"bqm": "Wumboko",
"bqn": "Bulgarian Sign Language",
"bqo": "Balo",
"bqp": "Busa",
"bqq": "Biritai",
"bqr": "Burusu",
"bqs": "Bosngun",
"bqt": "Bamukumbit",
"bqu": "Boguru",
"bqv": "Koro Wachi; Begbere-Ejar",
"bqw": "Buru (Nigeria)",
"bqx": "Baangi",
"bqy": "Bengkala Sign Language",
"bqz": "Bakaka",
"br": "Breton",
"bra": "Braj",
"brb": "Brao; Lave",
"brc": "Berbice Creole Dutch",
"brd": "Baraamu",
"brf": "Bira",
"brg": "Baure",
"brh": "Brahui",
"bri": "Mokpwe",
"brj": "Bieria",
"brk": "Birked",
"brl": "Birwa",
"brm": "Barambu",
"brn": "Boruca",
"bro": "Brokkat",
"brp": "Barapasi",
"brq": "Breri",
"brr": "Birao",
"brs": "Baras",
"brt": "Bitare",
"bru": "Eastern Bru",
"brv": "Western Bru",
"brw": "Bellari",
"brx": "Bodo (India)",
"bry": "Burui",
"brz": "Bilbil",
"bs": "Bosnian",
"bsa": "Abinomn",
"bsb": "Brunei Bisaya",
"bsc": "Bassari; Oniyan",
"bse": "Wushi",
"bsf": "Bauchi",
"bsg": "Bashkardi",
"bsh": "Kati",
"bsi": "Bassossi",
"bsj": "Bangwinji",
"bsk": "Burushaski",
"bsl": "Basa-Gumna",
"bsm": "Busami",
"bsn": "Barasana-Eduria",
"bso": "Buso",
"bsp": "Baga Sitemu",
"bsq": "Bassa",
"bsr": "Bassa-Kontagora",
"bss": "Akoose",
"bst": "Basketo",
"bsu": "Bahonsuai",
"bsv": "Baga Sobané",
"bsw": "Baiso",
"bsx": "Yangkam",
"bsy": "Sabah Bisaya",
"bta": "Bata",
"btc": "Bati (Cameroon)",
"btd": "Batak Dairi",
"bte": "Gamo-Ningi",
"btf": "Birgit",
"btg": "Gagnoa Bété",
"bth": "Biatah Bidayuh",
"bti": "Burate",
"btj": "Bacanese Malay",
"btk": "Batak languages",
"btm": "Batak Mandailing",
"btn": "Ratagnon",
"bto": "Rinconada Bikol",
"btp": "Budibud",
"btq": "Batek",
"btr": "Baetora",
"bts": "Batak Simalungun",
"btt": "Bete-Bendi",
"btu": "Batu",
"btv": "Bateri",
"btw": "Butuanon",
"btx": "Batak Karo",
"bty": "Bobot",
"btz": "Batak Alas-Kluet",
"bua": "Buriat",
"bub": "Bua",
"buc": "Bushi",
"bud": "Ntcham",
"bue": "Beothuk",
"buf": "Bushoong",
"bug": "Buginese",
"buh": "Younuo Bunu",
"bui": "Bongili",
"buj": "Basa-Gurmana",
"buk": "Bugawac",
"bum": "Bulu (Cameroon)",
"bun": "Sherbro",
"buo": "Terei",
"bup": "Busoa",
"buq": "Brem",
"bus": "Bokobaru",
"but": "Bungain",
"buu": "Budu",
"buv": "Bun",
"buw": "Bubi",
"bux": "Boghom",
"buy": "Bullom So",
"buz": "Bukwen",
"bva": "Barein",
"bvb": "Bube",
"bvc": "Baelelea",
"bvd": "Baeggu",
"bve": "Berau Malay",
"bvf": "Boor",
"bvg": "Bonkeng",
"bvh": "Bure",
"bvi": "Belanda Viri",
"bvj": "Baan",
"bvk": "Bukat",
"bvl": "Bolivian Sign Language",
"bvm": "Bamunka",
"bvn": "Buna",
"bvo": "Bolgo",
"bvp": "Bumang",
"bvq": "Birri",
"bvr": "Burarra",
"bvt": "Bati (Indonesia)",
"bvu": "Bukit Malay",
"bvv": "Baniva",
"bvw": "Boga",
"bvx": "Dibole",
"bvy": "Baybayanon",
"bvz": "Bauzi",
"bwa": "Bwatoo",
"bwb": "Namosi-Naitasiri-Serua",
"bwc": "Bwile",
"bwd": "Bwaidoka",
"bwe": "Bwe Karen",
"bwf": "Boselewa",
"bwg": "Barwe",
"bwh": "Bishuo",
"bwi": "Baniwa",
"bwj": "Láá Láá Bwamu",
"bwk": "Bauwaki",
"bwl": "Bwela",
"bwm": "Biwat",
"bwn": "Wunai Bunu",
"bwo": "Boro (Ethiopia); Borna (Ethiopia)",
"bwp": "Mandobo Bawah",
"bwq": "Southern Bobo Madaré",
"bwr": "Bura-Pabir",
"bws": "Bomboma",
"bwt": "Bafaw-Balong",
"bwu": "Buli (Ghana)",
"bww": "Bwa",
"bwx": "Bu-Nao Bunu",
"bwy": "Cwi Bwamu",
"bwz": "Bwisi",
"bxa": "Tairaha",
"bxb": "Belanda Bor",
"bxc": "Molengue",
"bxd": "Pela",
"bxe": "Birale",
"bxf": "Bilur; Minigir",
"bxg": "Bangala",
"bxh": "Buhutu",
"bxi": "Pirlatapa",
"bxj": "Bayungu",
"bxk": "Bukusu; Lubukusu",
"bxl": "Jalkunan",
"bxm": "Mongolia Buriat",
"bxn": "Burduna",
"bxo": "Barikanchi",
"bxp": "Bebil",
"bxq": "Beele",
"bxr": "Russia Buriat",
"bxs": "Busam",
"bxu": "China Buriat",
"bxv": "Berakou",
"bxw": "Bankagooma",
"bxz": "Binahari",
"bya": "Batak",
"byb": "Bikya",
"byc": "Ubaghara",
"byd": "Benyadu'",
"bye": "Pouye",
"byf": "Bete",
"byg": "Baygo",
"byh": "Bhujel",
"byi": "Buyu",
"byj": "Bina (Nigeria)",
"byk": "Biao",
"byl": "Bayono",
"bym": "Bidjara",
"byn": "Bilin; Blin",
"byo": "Biyo",
"byp": "Bumaji",
"byq": "Basay",
"byr": "Baruya; Yipma",
"bys": "Burak",
"byt": "Berti",
"byv": "Medumba",
"byw": "Belhariya",
"byx": "Qaqet",
"byz": "Banaro",
"bza": "Bandi",
"bzb": "Andio",
"bzc": "Southern Betsimisaraka Malagasy",
"bzd": "Bribri",
"bze": "Jenaama Bozo",
"bzf": "Boikin",
"bzg": "Babuza",
"bzh": "Mapos Buang",
"bzi": "Bisu",
"bzj": "Belize Kriol English",
"bzk": "Nicaragua Creole English",
"bzl": "Boano (Sulawesi)",
"bzm": "Bolondo",
"bzn": "Boano (Maluku)",
"bzo": "Bozaba",
"bzp": "Kemberano",
"bzq": "Buli (Indonesia)",
"bzr": "Biri",
"bzs": "Brazilian Sign Language",
"bzt": "Brithenig",
"bzu": "Burmeso",
"bzv": "Naami",
"bzw": "Basa (Nigeria)",
"bzx": "Kɛlɛngaxo Bozo",
"bzy": "Obanliku",
"bzz": "Evant",
"ca": "Catalan; Valencian",
"caa": "Chortí",
"cab": "Garifuna",
"cac": "Chuj",
"cad": "Caddo",
"cae": "Lehar; Laalaa",
"caf": "Southern Carrier",
"cag": "Nivaclé",
"cah": "Cahuarano",
"cai": "Central American Indian languages",
"caj": "Chané",
"cak": "Kaqchikel; Cakchiquel",
"cal": "Carolinian",
"cam": "Cemuhî",
"can": "Chambri",
"cao": "Chácobo",
"cap": "Chipaya",
"caq": "Car Nicobarese",
"car": "Galibi Carib",
"cas": "Tsimané",
"cau": "Caucasian languages",
"cav": "Cavineña",
"caw": "Callawalla",
"cax": "Chiquitano",
"cay": "Cayuga",
"caz": "Canichana",
"cba": "Chibchan languages",
"cbb": "Cabiyarí",
"cbc": "Carapana",
"cbd": "Carijona",
"cbg": "Chimila",
"cbi": "Chachi",
"cbj": "Ede Cabe",
"cbk": "Chavacano",
"cbl": "Bualkhaw Chin",
"cbn": "Nyahkur",
"cbo": "Izora",
"cbq": "Tsucuba; Cuba",
"cbr": "Cashibo-Cacataibo",
"cbs": "Cashinahua",
"cbt": "Chayahuita",
"cbu": "Candoshi-Shapra",
"cbv": "Cacua",
"cbw": "Kinabalian",
"cby": "Carabayo",
"ccc": "Chamicuro",
"ccd": "Cafundo Creole",
"cce": "Chopi",
"ccg": "Samba Daka",
"cch": "Atsam",
"ccj": "Kasanga",
"ccl": "Cutchi-Swahili",
"ccm": "Malaccan Creole Malay",
"ccn": "North Caucasian languages",
"cco": "Comaltepec Chinantec",
"ccp": "Chakma",
"ccr": "Cacaopera",
"ccs": "South Caucasian languages",
"cda": "Choni",
"cdc": "Chadic languages",
"cdd": "Caddoan languages",
"cde": "Chenchu",
"cdf": "Chiru",
"cdh": "Chambeali",
"cdi": "Chodri",
"cdj": "Churahi",
"cdm": "Chepang",
"cdn": "Chaudangsi",
"cdo": "Min Dong Chinese",
"cdr": "Cinda-Regi-Tiyal",
"cds": "Chadian Sign Language",
"cdy": "Chadong",
"cdz": "Koda",
"ce": "Chechen",
"cea": "Lower Chehalis",
"ceb": "Cebuano",
"ceg": "Chamacoco",
"cek": "Eastern Khumi Chin",
"cel": "Celtic languages",
"cen": "Cen",
"cet": "Centúúm",
"cey": "Ekai Chin",
"cfa": "Dijim-Bwilim",
"cfd": "Cara",
"cfg": "Como Karim",
"cfm": "Falam Chin",
"cga": "Changriwa",
"cgc": "Kagayanen",
"cgg": "Chiga",
"cgk": "Chocangacakha",
"ch": "Chamorro",
"chb": "Chibcha",
"chc": "Catawba",
"chd": "Highland Oaxaca Chontal",
"chf": "Tabasco Chontal",
"chg": "Chagatai",
"chh": "Chinook",
"chj": "Ojitlán Chinantec",
"chk": "Chuukese",
"chl": "Cahuilla",
"chm": "Mari (Russia)",
"chn": "Chinook jargon",
"cho": "Choctaw",
"chp": "Chipewyan; Dene Suline",
"chq": "Quiotepec Chinantec",
"chr": "Cherokee",
"cht": "Cholón",
"chw": "Chuwabu",
"chx": "Chantyal",
"chy": "Cheyenne",
"chz": "Ozumacín Chinantec",
"cia": "Cia-Cia",
"cib": "Ci Gbe",
"cic": "Chickasaw",
"cid": "Chimariko",
"cie": "Cineni",
"cih": "Chinali",
"cik": "Chitkuli Kinnauri",
"cim": "Cimbrian",
"cin": "Cinta Larga",
"cip": "Chiapanec",
"cir": "Tiri; Haméa; Méa",
"ciw": "Chippewa",
"ciy": "Chaima",
"cja": "Western Cham",
"cje": "Chru",
"cjh": "Upper Chehalis",
"cji": "Chamalal",
"cjk": "Chokwe",
"cjm": "Eastern Cham",
"cjn": "Chenapian",
"cjo": "Ashéninka Pajonal",
"cjp": "Cabécar",
"cjs": "Shor",
"cjv": "Chuave",
"cjy": "Jinyu Chinese",
"ckb": "Central Kurdish",
"ckh": "Chak",
"ckl": "Cibak",
"ckm": "Chakavian",
"ckn": "Kaang Chin",
"cko": "Anufo",
"ckq": "Kajakse",
"ckr": "Kairak",
"cks": "Tayo",
"ckt": "Chukot",
"cku": "Koasati",
"ckv": "Kavalan",
"ckx": "Caka",
"cky": "Cakfem-Mushere",
"ckz": "Cakchiquel-Quiché Mixed Language",
"cla": "Ron",
"clc": "Chilcotin",
"cld": "Chaldean Neo-Aramaic",
"cle": "Lealao Chinantec",
"clh": "Chilisso",
"cli": "Chakali",
"clj": "Laitu Chin",
"clk": "Idu-Mishmi",
"cll": "Chala",
"clm": "Clallam",
"clo": "Lowland Oaxaca Chontal",
"clt": "Lautu Chin",
"clu": "Caluyanun",
"clw": "Chulym",
"cly": "Eastern Highland Chatino",
"cma": "Maa",
"cmc": "Chamic languages",
"cme": "Cerma",
"cmg": "Classical Mongolian",
"cmi": "Emberá-Chamí",
"cml": "Campalagian",
"cmm": "Michigamea",
"cmn": "Mandarin Chinese",
"cmo": "Central Mnong",
"cmr": "Mro-Khimi Chin",
"cms": "Messapic",
"cmt": "Camtho",
"cna": "Changthang",
"cnb": "Chinbon Chin",
"cnc": "Côông",
"cng": "Northern Qiang",
"cnh": "Hakha Chin; Haka Chin",
"cni": "Asháninka",
"cnk": "Khumi Chin",
"cnl": "Lalana Chinantec",
"cno": "Con",
"cnp": "Northern Ping Chinese; Northern Pinghua",
"cnq": "Chung",
"cnr": "Montenegrin",
"cns": "Central Asmat",
"cnt": "Tepetotutla Chinantec",
"cnu": "Chenoua",
"cnw": "Ngawn Chin",
"cnx": "Middle Cornish",
"co": "Corsican",
"coa": "Cocos Islands Malay",
"cob": "Chicomuceltec",
"coc": "Cocopa",
"cod": "Cocama-Cocamilla",
"coe": "Koreguaje",
"cof": "Colorado",
"cog": "Chong",
"coh": "Chonyi-Dzihana-Kauma; Chichonyi-Chidzihana-Chikauma",
"coj": "Cochimi",
"cok": "Santa Teresa Cora",
"col": "Columbia-Wenatchi",
"com": "Comanche",
"con": "Cofán",
"coo": "Comox",
"cop": "Coptic",
"coq": "Coquille",
"cot": "Caquinte",
"cou": "Wamey",
"cov": "Cao Miao",
"cow": "Cowlitz",
"cox": "Nanti",
"coz": "Chochotec",
"cpa": "Palantla Chinantec",
"cpb": "Ucayali-Yurúa Ashéninka",
"cpc": "Ajyíninka Apurucayali",
"cpe": "English-based creoles and pidgins",
"cpf": "French-based creoles and pidgins",
"cpg": "Cappadocian Greek",
"cpi": "Chinese Pidgin English",
"cpn": "Cherepon",
"cpo": "Kpeego",
"cpp": "Portuguese-based creoles and pidgins",
"cps": "Capiznon",
"cpu": "Pichis Ashéninka",
"cpx": "Pu-Xian Chinese",
"cpy": "South Ucayali Ashéninka",
"cqd": "Chuanqiandian Cluster Miao",
"cr": "Cree",
"cra": "Chara",
"crb": "Island Carib",
"crc": "Lonwolwol",
"crd": "Coeur d'Alene",
"crf": "Caramanta",
"crg": "Michif",
"crh": "Crimean Tatar; Crimean Turkish",
"cri": "Sãotomense",
"crj": "Southern East Cree",
"crk": "Plains Cree",
"crl": "Northern East Cree",
"crm": "Moose Cree",
"crn": "El Nayar Cora",
"cro": "Crow",
"crp": "Creoles and pidgins",
"crq": "Iyo'wujwa Chorote",
"crr": "Carolina Algonquian",
"crs": "Seselwa Creole French",
"crt": "Iyojwa'ja Chorote",
"crv": "Chaura",
"crw": "Chrau",
"crx": "Carrier",
"cry": "Cori",
"crz": "Cruzeño",
"cs": "Czech",
"csa": "Chiltepec Chinantec",
"csb": "Kashubian",
"csc": "Catalan Sign Language; Lengua de señas catalana; Llengua de Signes Catalana",
"csd": "Chiangmai Sign Language",
"cse": "Czech Sign Language",
"csf": "Cuba Sign Language",
"csg": "Chilean Sign Language",
"csh": "Asho Chin",
"csi": "Coast Miwok",
"csj": "Songlai Chin",
"csk": "Jola-Kasa",
"csl": "Chinese Sign Language",
"csm": "Central Sierra Miwok",
"csn": "Colombian Sign Language",
"cso": "Sochiapam Chinantec; Sochiapan Chinantec",
"csp": "Southern Ping Chinese; Southern Pinghua",
"csq": "Croatia Sign Language",
"csr": "Costa Rican Sign Language",
"css": "Southern Ohlone",
"cst": "Northern Ohlone",
"csu": "Central Sudanic languages",
"csv": "Sumtu Chin",
"csw": "Swampy Cree",
"csx": "Cambodian Sign Language",
"csy": "Siyin Chin",
"csz": "Coos",
"cta": "Tataltepec Chatino",
"ctc": "Chetco",
"ctd": "Tedim Chin",
"cte": "Tepinapa Chinantec",
"ctg": "Chittagonian",
"cth": "Thaiphum Chin",
"ctl": "Tlacoatzintepec Chinantec",
"ctm": "Chitimacha",
"ctn": "Chhintange",
"cto": "Emberá-Catío",
"ctp": "Western Highland Chatino",
"cts": "Northern Catanduanes Bikol",
"ctt": "Wayanad Chetti",
"ctu": "Chol",
"cty": "Moundadan Chetty",
"ctz": "Zacatepec Chatino",
"cu": "Church Slavic; Church Slavonic; Old Bulgarian; Old Church Slavonic; Old Slavonic",
"cua": "Cua",
"cub": "Cubeo",
"cuc": "Usila Chinantec",
"cuh": "Chuka; Gichuka",
"cui": "Cuiba",
"cuj": "Mashco Piro",
"cuk": "San Blas Kuna",
"cul": "Culina; Kulina",
"cuo": "Cumanagoto",
"cup": "Cupeño",
"cuq": "Cun",
"cur": "Chhulung",
"cus": "Cushitic languages",
"cut": "Teutila Cuicatec",
"cuu": "Tai Ya",
"cuv": "Cuvok",
"cuw": "Chukwa",
"cux": "Tepeuxila Cuicatec",
"cuy": "Cuitlatec",
"cv": "Chuvash",
"cvg": "Chug",
"cvn": "Valle Nacional Chinantec",
"cwa": "Kabwa",
"cwb": "Maindo",
"cwd": "Woods Cree",
"cwe": "Kwere",
"cwg": "Chewong; Cheq Wong",
"cwt": "Kuwaataay",
"cy": "Welsh",
"cya": "Nopala Chatino",
"cyb": "Cayubaba",
"cyo": "Cuyonon",
"czh": "Huizhou Chinese",
"czk": "Knaanic",
"czn": "Zenzontepec Chatino",
"czo": "Min Zhong Chinese",
"czt": "Zotung Chin",
"da": "Danish",
"daa": "Dangaléat",
"dac": "Dambi",
"dad": "Marik",
"dae": "Duupa",
"dag": "Dagbani",
"dah": "Gwahatike",
"dai": "Day",
"daj": "Dar Fur Daju",
"dak": "Dakota",
"dal": "Dahalo",
"dam": "Damakawa",
"dao": "Daai Chin",
"daq": "Dandami Maria",
"dar": "Dargwa",
"das": "Daho-Doo",
"dau": "Dar Sila Daju",
"dav": "Taita; Dawida",
"daw": "Davawenyo",
"dax": "Dayi",
"day": "Land Dayak languages",
"daz": "Dao",
"dba": "Bangime",
"dbb": "Deno",
"dbd": "Dadiya",
"dbe": "Dabe",
"dbf": "Edopi",
"dbg": "Dogul Dom Dogon",
"dbi": "Doka",
"dbj": "Ida'an",
"dbl": "Dyirbal",
"dbm": "Duguri",
"dbn": "Duriankere",
"dbo": "Dulbu",
"dbp": "Duwai",
"dbq": "Daba",
"dbr": "Dabarre",
"dbt": "Ben Tey Dogon",
"dbu": "Bondum Dom Dogon",
"dbv": "Dungu",
"dbw": "Bankan Tey Dogon",
"dby": "Dibiyaso",
"dcc": "Deccan",
"dcr": "Negerhollands",
"dda": "Dadi Dadi",
"ddd": "Dongotono",
"dde": "Doondo",
"ddg": "Fataluku",
"ddi": "West Goodenough",
"ddj": "Jaru",
"ddn": "Dendi (Benin)",
"ddo": "Dido",
"ddr": "Dhudhuroa",
"dds": "Donno So Dogon",
"ddw": "Dawera-Daweloor",
"de": "German",
"dec": "Dagik",
"ded": "Dedua",
"dee": "Dewoin",
"def": "Dezfuli",
"deg": "Degema",
"deh": "Dehwari",
"dei": "Demisa",
"dek": "Dek",
"del": "Delaware",
"dem": "Dem",
"den": "Slave (Athapascan)",
"dep": "Pidgin Delaware",
"deq": "Dendi (Central African Republic)",
"der": "Deori",
"des": "Desano",
"dev": "Domung",
"dez": "Dengese",
"dga": "Southern Dagaare",
"dgb": "Bunoge Dogon",
"dgc": "Casiguran Dumagat Agta",
"dgd": "Dagaari Dioula",
"dge": "Degenan",
"dgg": "Doga",
"dgh": "Dghwede",
"dgi": "Northern Dagara",
"dgk": "Dagba",
"dgl": "Andaandi; Dongolawi",
"dgn": "Dagoman",
"dgo": "Dogri (individual language)",
"dgr": "Dogrib; Tłı̨chǫ",
"dgs": "Dogoso",
"dgt": "Ndra'ngith",
"dgw": "Daungwurrung",
"dgx": "Doghoro",
"dgz": "Daga",
"dhd": "Dhundari",
"dhg": "Dhangu-Djangu; Dhangu; Djangu",
"dhi": "Dhimal",
"dhl": "Dhalandji",
"dhm": "Zemba",
"dhn": "Dhanki",
"dho": "Dhodia",
"dhr": "Dhargari",
"dhs": "Dhaiso",
"dhu": "Dhurga",
"dhv": "Dehu; Drehu",
"dhw": "Dhanwar (Nepal)",
"dhx": "Dhungaloo",
"dia": "Dia",
"dib": "South Central Dinka",
"dic": "Lakota Dida",
"did": "Didinga",
"dif": "Dieri; Diyari",
"dig": "Digo; Chidigo",
"dih": "Kumiai",
"dii": "Dimbong",
"dij": "Dai",
"dik": "Southwestern Dinka",
"dil": "Dilling",
"dim": "Dime",
"din": "Dinka",
"dio": "Dibo",
"dip": "Northeastern Dinka",
"diq": "Dimli (individual language)",
"dir": "Dirim",
"dis": "Dimasa",
"diu": "Diriku",
"diw": "Northwestern Dinka",
"dix": "Dixon Reef",
"diy": "Diuwe",
"diz": "Ding",
"dja": "Djadjawurrung",
"djb": "Djinba",
"djc": "Dar Daju Daju",
"djd": "Djamindjung; Ngaliwurru",
"dje": "Zarma",
"djf": "Djangun",
"dji": "Djinang",
"djj": "Djeebbana",
"djk": "Eastern Maroon Creole; Businenge Tongo; Nenge",
"djm": "Jamsay Dogon",
"djn": "Jawoyn; Djauan",
"djo": "Jangkang",
"djr": "Djambarrpuyngu",
"dju": "Kapriman",
"djw": "Djawi",
"dka": "Dakpakha",
"dkg": "Kadung",
"dkk": "Dakka",
"dkr": "Kuijau",
"dks": "Southeastern Dinka",
"dkx": "Mazagway",
"dlg": "Dolgan",
"dlk": "Dahalik",
"dlm": "Dalmatian",
"dln": "Darlong",
"dma": "Duma",
"dmb": "Mombo Dogon",
"dmc": "Gavak",
"dmd": "Madhi Madhi",
"dme": "Dugwor",
"dmf": "Medefaidrin",
"dmg": "Upper Kinabatangan",
"dmk": "Domaaki",
"dml": "Dameli",
"dmm": "Dama",
"dmn": "Mande languages",
"dmo": "Kemedzung",
"dmr": "East Damar",
"dms": "Dampelas",
"dmu": "Dubu; Tebi",
"dmv": "Dumpas",
"dmw": "Mudburra",
"dmx": "Dema",
"dmy": "Demta; Sowari",
"dna": "Upper Grand Valley Dani",
"dnd": "Daonda",
"dne": "Ndendeule",
"dng": "Dungan",
"dni": "Lower Grand Valley Dani",
"dnj": "Dan",
"dnk": "Dengka",
"dnn": "Dzùùngoo",
"dno": "Ndrulo; Northern Lendu",
"dnr": "Danaru",
"dnt": "Mid Grand Valley Dani",
"dnu": "Danau",
"dnv": "Danu",
"dnw": "Western Dani",
"dny": "Dení",
"doa": "Dom",
"dob": "Dobu",
"doc": "Northern Dong",
"doe": "Doe",
"dof": "Domu",
"doh": "Dong",
"doi": "Dogri (macrolanguage)",
"dok": "Dondo",
"dol": "Doso",
"don": "Toura (Papua New Guinea)",
"doo": "Dongo",
"dop": "Lukpa",
"doq": "Dominican Sign Language",
"dor": "Dori'o",
"dos": "Dogosé",
"dot": "Dass",
"dov": "Dombe",
"dow": "Doyayo",
"dox": "Bussa",
"doy": "Dompo",
"doz": "Dorze",
"dpp": "Papar",
"dra": "Dravidian languages",
"drb": "Dair",
"drc": "Minderico",
"drd": "Darmiya",
"dre": "Dolpo",
"drg": "Rungus",
"dri": "C'Lela",
"drl": "Paakantyi",
"drn": "West Damar",
"dro": "Daro-Matu Melanau",
"drq": "Dura",
"drs": "Gedeo",
"drt": "Drents",
"dru": "Rukai",
"dry": "Darai",
"dsb": "Lower Sorbian",
"dse": "Dutch Sign Language",
"dsh": "Daasanach",
"dsi": "Disa",
"dsl": "Danish Sign Language",
"dsn": "Dusner",
"dso": "Desiya",
"dsq": "Tadaksahak",
"dsz": "Mardin Sign Language",
"dta": "Daur",
"dtb": "Labuk-Kinabatangan Kadazan",
"dtd": "Ditidaht",
"dth": "Adithinngithigh",
"dti": "Ana Tinga Dogon",
"dtk": "Tene Kan Dogon",
"dtm": "Tomo Kan Dogon",
"dtn": "Daatsʼíin",
"dto": "Tommo So Dogon",
"dtp": "Kadazan Dusun; Central Dusun",
"dtr": "Lotud",
"dts": "Toro So Dogon",
"dtt": "Toro Tegu Dogon",
"dtu": "Tebul Ure Dogon",
"dty": "Dotyali",
"dua": "Duala",
"dub": "Dubli",
"duc": "Duna",
"due": "Umiray Dumaget Agta",
"duf": "Dumbea; Drubea",
"dug": "Duruma; Chiduruma",
"duh": "Dungra Bhil",
"dui": "Dumun",
"duk": "Uyajitaya",
"dul": "Alabat Island Agta",
"dum": "Middle Dutch (ca. 1050-1350)",
"dun": "Dusun Deyah",
"duo": "Dupaninan Agta",
"dup": "Duano",
"duq": "Dusun Malang",
"dur": "Dii",
"dus": "Dumi",
"duu": "Drung",
"duv": "Duvle",
"duw": "Dusun Witu",
"dux": "Duungooma",
"duy": "Dicamay Agta",
"duz": "Duli-Gey",
"dv": "Dhivehi; Divehi; Maldivian",
"dva": "Duau",
"dwa": "Diri",
"dwk": "Dawik Kui",
"dwr": "Dawro",
"dws": "Dutton World Speedwords",
"dwu": "Dhuwal",
"dww": "Dawawa",
"dwy": "Dhuwaya",
"dwz": "Dewas Rai",
"dya": "Dyan",
"dyb": "Dyaberdyaber",
"dyd": "Dyugun",
"dyg": "Villa Viciosa Agta",
"dyi": "Djimini Senoufo",
"dym": "Yanda Dom Dogon",
"dyn": "Dyangadi; Dhanggatti",
"dyo": "Jola-Fonyi",
"dyu": "Dyula",
"dyy": "Djabugay; Dyaabugay",
"dz": "Dzongkha",
"dza": "Tunzu",
"dze": "Djiwarli",
"dzg": "Dazaga",
"dzl": "Dzalakha",
"dzn": "Dzando",
"eaa": "Karenggapa",
"ebc": "Beginci",
"ebg": "Ebughu",
"ebk": "Eastern Bontok",
"ebo": "Teke-Ebo",
"ebr": "Ebrié",
"ebu": "Embu; Kiembu",
"ecr": "Eteocretan",
"ecs": "Ecuadorian Sign Language",
"ecy": "Eteocypriot",
"ee": "Ewe",
"eee": "E",
"efa": "Efai",
"efe": "Efe",
"efi": "Efik",
"ega": "Ega",
"egl": "Emilian",
"egm": "Benamanga",
"ego": "Eggon",
"egx": "Egyptian languages",
"egy": "Egyptian (Ancient)",
"ehs": "Miyakubo Sign Language",
"ehu": "Ehueun",
"eip": "Eipomek",
"eit": "Eitiep",
"eiv": "Askopan",
"eja": "Ejamat",
"eka": "Ekajuk",
"eke": "Ekit",
"ekg": "Ekari",
"eki": "Eki",
"ekk": "Standard Estonian",
"ekl": "Kol (Bangladesh); Kol",
"ekm": "Elip",
"eko": "Koti",
"ekp": "Ekpeye",
"ekr": "Yace",
"eky": "Eastern Kayah",
"el": "Modern Greek (1453-)",
"ele": "Elepi",
"elh": "El Hugeirat",
"eli": "Nding",
"elk": "Elkei",
"elm": "Eleme",
"elo": "El Molo",
"elu": "Elu",
"elx": "Elamite",
"ema": "Emai-Iuleha-Ora",
"emb": "Embaloh",
"eme": "Emerillon",
"emg": "Eastern Meohang",
"emi": "Mussau-Emira",
"emk": "Eastern Maninkakan",
"emm": "Mamulique",
"emn": "Eman",
"emp": "Northern Emberá",
"emq": "Eastern Minyag",
"ems": "Pacific Gulf Yupik",
"emu": "Eastern Muria",
"emw": "Emplawas",
"emx": "Erromintxela",
"emy": "Epigraphic Mayan",
"emz": "Mbessa",
"en": "English",
"ena": "Apali",
"enb": "Markweeta",
"enc": "En",
"end": "Ende",
"enf": "Forest Enets",
"enh": "Tundra Enets",
"enl": "Enlhet",
"enm": "Middle English (1100-1500)",
"enn": "Engenni",
"eno": "Enggano",
"enq": "Enga",
"enr": "Emumu; Emem",
"enu": "Enu",
"env": "Enwan (Edo State)",
"enw": "Enwan (Akwa Ibom State)",
"enx": "Enxet",
"eo": "Esperanto",
"eot": "Beti (Côte d'Ivoire)",
"epi": "Epie",
"era": "Eravallan",
"erg": "Sie",
"erh": "Eruwa",
"eri": "Ogea",
"erk": "South Efate",
"ero": "Horpa",
"err": "Erre",
"ers": "Ersu",
"ert": "Eritai",
"erw": "Erokwanas",
"es": "Spanish; Castilian",
"ese": "Ese Ejja",
"esg": "Aheri Gondi",
"esh": "Eshtehardi",
"esi": "North Alaskan Inupiatun",
"esk": "Northwest Alaska Inupiatun",
"esl": "Egypt Sign Language",
"esm": "Esuma",
"esn": "Salvadoran Sign Language",
"eso": "Estonian Sign Language",
"esq": "Esselen",
"ess": "Central Siberian Yupik",
"esu": "Central Yupik",
"esx": "Eskimo-Aleut languages",
"esy": "Eskayan",
"et": "Estonian",
"etb": "Etebi",
"etc": "Etchemin",
"eth": "Ethiopian Sign Language",
"etn": "Eton (Vanuatu)",
"eto": "Eton (Cameroon)",
"etr": "Edolo",
"ets": "Yekhee",
"ett": "Etruscan",
"etu": "Ejagham",
"etx": "Eten",
"etz": "Semimi",
"eu": "Basque",
"euq": "Basque (family)",
"eve": "Even",
"evh": "Uvbie",
"evn": "Evenki",
"ewo": "Ewondo",
"ext": "Extremaduran",
"eya": "Eyak",
"eyo": "Keiyo",
"eza": "Ezaa",
"eze": "Uzekwe",
"fa": "Persian",
"faa": "Fasu",
"fab": "Fa d'Ambu",
"fad": "Wagi",
"faf": "Fagani",
"fag": "Finongan",
"fah": "Baissa Fali",
"fai": "Faiwol",
"faj": "Faita",
"fak": "Fang (Cameroon)",
"fal": "South Fali",
"fam": "Fam",
"fan": "Fang (Equatorial Guinea)",
"fap": "Paloor",
"far": "Fataleka",
"fat": "Fanti",
"fau": "Fayu",
"fax": "Fala",
"fay": "Southwestern Fars",
"faz": "Northwestern Fars",
"fbl": "West Albay Bikol",
"fcs": "Quebec Sign Language",
"fer": "Feroge",
"ff": "Fulah",
"ffi": "Foia Foia",
"ffm": "Maasina Fulfulde",
"fgr": "Fongoro",
"fi": "Finnish",
"fia": "Nobiin",
"fie": "Fyer",
"fif": "Faifi",
"fil": "Filipino; Pilipino",
"fip": "Fipa",
"fir": "Firan",
"fit": "Tornedalen Finnish; Meänkieli",
"fiu": "Finno-Ugrian languages",
"fiw": "Fiwaga",
"fj": "Fijian",
"fkk": "Kirya-Konzəl",
"fkv": "Kven Finnish",
"fla": "Kalispel-Pend d'Oreille",
"flh": "Foau",
"fli": "Fali",
"fll": "North Fali",
"fln": "Flinders Island",
"flr": "Fuliiru",
"fly": "Flaaitaal; Tsotsitaal",
"fmp": "Fe'fe'",
"fmu": "Far Western Muria",
"fnb": "Fanbak",
"fng": "Fanagalo",
"fni": "Fania",
"fo": "Faroese",
"fod": "Foodo",
"foi": "Foi",
"fom": "Foma",
"fon": "Fon",
"for": "Fore",
"fos": "Siraya",
"fox": "Formosan languages",
"fpe": "Fernando Po Creole English",
"fqs": "Fas",
"fr": "French",
"frc": "Cajun French",
"frd": "Fordata",
"frk": "Frankish",
"frm": "Middle French (ca. 1400-1600)",
"fro": "Old French (842-ca. 1400)",
"frp": "Arpitan; Francoprovençal",
"frq": "Forak",
"frr": "Northern Frisian",
"frs": "Eastern Frisian",
"frt": "Fortsenal",
"fse": "Finnish Sign Language",
"fsl": "French Sign Language",
"fss": "Finland-Swedish Sign Language; finlandssvenskt teckenspråk; suomenruotsalainen viittomakieli",
"fub": "Adamawa Fulfulde",
"fuc": "Pulaar",
"fud": "East Futuna",
"fue": "Borgu Fulfulde",
"fuf": "Pular",
"fuh": "Western Niger Fulfulde",
"fui": "Bagirmi Fulfulde",
"fuj": "Ko",
"fum": "Fum",
"fun": "Fulniô",
"fuq": "Central-Eastern Niger Fulfulde",
"fur": "Friulian",
"fut": "Futuna-Aniwa",
"fuu": "Furu",
"fuv": "Nigerian Fulfulde",
"fuy": "Fuyug",
"fvr": "Fur",
"fwa": "Fwâi",
"fwe": "Fwe",
"fy": "Western Frisian",
"ga": "Irish",
"gaa": "Ga",
"gab": "Gabri",
"gac": "Mixed Great Andamanese",
"gad": "Gaddang",
"gae": "Guarequena",
"gaf": "Gende",
"gag": "Gagauz",
"gah": "Alekano",
"gai": "Borei",
"gaj": "Gadsup",
"gak": "Gamkonora",
"gal": "Galolen",
"gam": "Kandawo",
"gan": "Gan Chinese",
"gao": "Gants",
"gap": "Gal",
"gaq": "Gata'",
"gar": "Galeya",
"gas": "Adiwasi Garasia",
"gat": "Kenati",
"gau": "Mudhili Gadaba",
"gaw": "Nobonob",
"gax": "Borana-Arsi-Guji Oromo",
"gay": "Gayo",
"gaz": "West Central Oromo",
"gba": "Gbaya (Central African Republic)",
"gbb": "Kaytetye",
"gbd": "Karajarri",
"gbe": "Niksek",
"gbf": "Gaikundi",
"gbg": "Gbanziri",
"gbh": "Defi Gbe",
"gbi": "Galela",
"gbj": "Bodo Gadaba",
"gbk": "Gaddi",
"gbl": "Gamit",
"gbm": "Garhwali",
"gbn": "Mo'da",
"gbo": "Northern Grebo",
"gbp": "Gbaya-Bossangoa",
"gbq": "Gbaya-Bozoum",
"gbr": "Gbagyi",
"gbs": "Gbesi Gbe",
"gbu": "Gagadu",
"gbv": "Gbanu",
"gbw": "Gabi-Gabi",
"gbx": "Eastern Xwla Gbe",
"gby": "Gbari",
"gbz": "Zoroastrian Dari",
"gcc": "Mali",
"gcd": "Ganggalida",
"gce": "Galice",
"gcf": "Guadeloupean Creole French",
"gcl": "Grenadian Creole English",
"gcn": "Gaina",
"gcr": "Guianese Creole French",
"gct": "Colonia Tovar German",
"gd": "Scottish Gaelic; Gaelic",
"gda": "Gade Lohar",
"gdb": "Pottangi Ollar Gadaba",
"gdc": "Gugu Badhun",
"gdd": "Gedaged",
"gde": "Gude",
"gdf": "Guduf-Gava",
"gdg": "Ga'dang",
"gdh": "Gadjerawang; Gajirrabeng",
"gdi": "Gundi",
"gdj": "Gurdjar",
"gdk": "Gadang",
"gdl": "Dirasha",
"gdm": "Laal",
"gdn": "Umanakaina",
"gdo": "Ghodoberi",
"gdq": "Mehri",
"gdr": "Wipi",
"gds": "Ghandruk Sign Language",
"gdt": "Kungardutyi",
"gdu": "Gudu",
"gdx": "Godwari",
"gea": "Geruma",
"geb": "Kire",
"gec": "Gboloo Grebo",
"ged": "Gade",
"gef": "Gerai",
"geg": "Gengle",
"geh": "Hutterite German; Hutterisch",
"gei": "Gebe",
"gej": "Gen",
"gek": "Ywom",
"gel": "ut-Ma'in",
"gem": "Germanic languages",
"geq": "Geme",
"ges": "Geser-Gorom",
"gev": "Eviya",
"gew": "Gera",
"gex": "Garre",
"gey": "Enya",
"gez": "Geez",
"gfk": "Patpatar",
"gft": "Gafat",
"gga": "Gao",
"ggb": "Gbii",
"ggd": "Gugadj",
"gge": "Gurr-goni",
"ggg": "Gurgula",
"ggk": "Kungarakany",
"ggl": "Ganglau",
"ggt": "Gitua",
"ggu": "Gagu; Gban",
"ggw": "Gogodala",
"gha": "Ghadamès",
"ghc": "Hiberno-Scottish Gaelic",
"ghe": "Southern Ghale",
"ghh": "Northern Ghale",
"ghk": "Geko Karen",
"ghl": "Ghulfan",
"ghn": "Ghanongga",
"gho": "Ghomara",
"ghr": "Ghera",
"ghs": "Guhu-Samane",
"ght": "Kuke; Kutang Ghale",
"gia": "Kija",
"gib": "Gibanawa",
"gic": "Gail",
"gid": "Gidar",
"gie": "Gaɓogbo; Guébie",
"gig": "Goaria",
"gih": "Githabul",
"gii": "Girirra",
"gil": "Gilbertese",
"gim": "Gimi (Eastern Highlands)",
"gin": "Hinukh",
"gip": "Gimi (West New Britain)",
"giq": "Green Gelao",
"gir": "Red Gelao",
"gis": "North Giziga",
"git": "Gitxsan",
"giu": "Mulao",
"giw": "White Gelao",
"gix": "Gilima",
"giy": "Giyug",
"giz": "South Giziga",
"gjk": "Kachi Koli",
"gjm": "Gunditjmara",
"gjn": "Gonja",
"gjr": "Gurindji Kriol",
"gju": "Gujari",
"gka": "Guya",
"gkd": "Magɨ (Madang Province)",
"gke": "Ndai",
"gkn": "Gokana",
"gko": "Kok-Nar",
"gkp": "Guinea Kpelle",
"gku": "ǂUngkue",
"gl": "Galician",
"glb": "Belning",
"glc": "Bon Gula",
"gld": "Nanai",
"glh": "Northwest Pashai; Northwest Pashayi",
"glj": "Gula Iro",
"glk": "Gilaki",
"gll": "Garlali",
"glo": "Galambu",
"glr": "Glaro-Twabo",
"glu": "Gula (Chad)",
"glw": "Glavda",
"gly": "Gule",
"gma": "Gambera",
"gmb": "Gula'alaa",
"gmd": "Mághdì",
"gme": "East Germanic languages",
"gmg": "Magɨyi",
"gmh": "Middle High German (ca. 1050-1500)",
"gml": "Middle Low German",
"gmm": "Gbaya-Mbodomo",
"gmn": "Gimnime",
"gmq": "North Germanic languages",
"gmr": "Mirning; Mirniny",
"gmu": "Gumalu",
"gmv": "Gamo",
"gmw": "West Germanic languages",
"gmx": "Magoma",
"gmy": "Mycenaean Greek",
"gmz": "Mgbolizhia",
"gn": "Guarani",
"gna": "Kaansa",
"gnb": "Gangte",
"gnc": "Guanche",
"gnd": "Zulgo-Gemzek",
"gne": "Ganang",
"gng": "Ngangam",
"gnh": "Lere",
"gni": "Gooniyandi",
"gnj": "Ngen",
"gnk": "ǁGana",
"gnl": "Gangulu",
"gnm": "Ginuman",
"gnn": "Gumatj",
"gno": "Northern Gondi",
"gnq": "Gana",
"gnr": "Gureng Gureng",
"gnt": "Guntai",
"gnu": "Gnau",
"gnw": "Western Bolivian Guaraní",
"gnz": "Ganzi",
"goa": "Guro",
"gob": "Playero",
"goc": "Gorakor",
"god": "Godié",
"goe": "Gongduk",
"gof": "Gofa",
"gog": "Gogo",
"goh": "Old High German (ca. 750-1050)",
"goi": "Gobasi",
"goj": "Gowlan",
"gok": "Gowli",
"gol": "Gola",
"gom": "Goan Konkani",
"gon": "Gondi",
"goo": "Gone Dau",
"gop": "Yeretuar",
"goq": "Gorap",
"gor": "Gorontalo",
"gos": "Gronings",
"got": "Gothic",
"gou": "Gavar",
"gov": "Goo",
"gow": "Gorowa",
"gox": "Gobu",
"goy": "Goundo",
"goz": "Gozarkhani",
"gpa": "Gupa-Abawa",
"gpe": "Ghanaian Pidgin English",
"gpn": "Taiap",
"gqa": "Ga'anda",
"gqi": "Guiqiong",
"gqn": "Guana (Brazil)",
"gqr": "Gor",
"gqu": "Qau",
"gra": "Rajput Garasia",
"grb": "Grebo",
"grc": "Ancient Greek (to 1453)",
"grd": "Guruntum-Mbaaru",
"grg": "Madi",
"grh": "Gbiri-Niragu",
"gri": "Ghari",
"grj": "Southern Grebo",
"grk": "Greek languages",
"grm": "Kota Marudu Talantang",
"gro": "Groma",
"grq": "Gorovu",
"grr": "Taznatit",
"grs": "Gresi",
"grt": "Garo",
"gru": "Kistane",
"grv": "Central Grebo",
"grw": "Gweda",
"grx": "Guriaso",
"gry": "Barclayville Grebo",
"grz": "Guramalum",
"gse": "Ghanaian Sign Language",
"gsg": "German Sign Language",
"gsl": "Gusilay",
"gsm": "Guatemalan Sign Language",
"gsn": "Nema; Gusan",
"gso": "Southwest Gbaya",
"gsp": "Wasembo",
"gss": "Greek Sign Language",
"gsw": "Swiss German; Alemannic; Alsatian",
"gta": "Guató",
"gtu": "Aghu-Tharnggala",
"gu": "Gujarati",
"gua": "Shiki",
"gub": "Guajajára",
"guc": "Wayuu",
"gud": "Yocoboué Dida",
"gue": "Gurindji",
"guf": "Gupapuyngu",
"gug": "Paraguayan Guaraní",
"guh": "Guahibo",
"gui": "Eastern Bolivian Guaraní",
"guk": "Gumuz",
"gul": "Sea Island Creole English",
"gum": "Guambiano",
"gun": "Mbyá Guaraní",
"guo": "Guayabero",
"gup": "Gunwinggu",
"guq": "Aché",
"gur": "Farefare",
"gus": "Guinean Sign Language",
"gut": "Maléku Jaíka",
"guu": "Yanomamö",
"guw": "Gun",
"gux": "Gourmanchéma",
"guz": "Gusii; Ekegusii",
"gv": "Manx",
"gva": "Guana (Paraguay)",
"gvc": "Guanano",
"gve": "Duwet",
"gvf": "Golin",
"gvj": "Guajá",
"gvl": "Gulay",
"gvm": "Gurmana",
"gvn": "Kuku-Yalanji",
"gvo": "Gavião Do Jiparaná",
"gvp": "Pará Gavião",
"gvr": "Gurung",
"gvs": "Gumawana",
"gvy": "Guyani",
"gwa": "Mbato",
"gwb": "Gwa",
"gwc": "Gawri; Kalami",
"gwd": "Gawwada",
"gwe": "Gweno",
"gwf": "Gowro",
"gwg": "Moo",
"gwi": "Gwichʼin",
"gwj": "ǀGwi",
"gwm": "Awngthim",
"gwn": "Gwandara",
"gwr": "Gwere",
"gwt": "Gawar-Bati",
"gwu": "Guwamu",
"gww": "Kwini",
"gwx": "Gua",
"gxx": "Wè Southern",
"gya": "Northwest Gbaya",
"gyb": "Garus",
"gyd": "Kayardild",
"gye": "Gyem",
"gyf": "Gungabula",
"gyg": "Gbayi",
"gyi": "Gyele",
"gyl": "Gayil",
"gym": "Ngäbere",
"gyn": "Guyanese Creole English",
"gyo": "Gyalsumdo",
"gyr": "Guarayu",
"gyy": "Gunya",
"gyz": "Geji; Gyaazi",
"gza": "Ganza",
"gzi": "Gazi",
"gzn": "Gane",
"ha": "Hausa",
"haa": "Han",
"hab": "Hanoi Sign Language",
"hac": "Gurani",
"had": "Hatam",
"hae": "Eastern Oromo",
"haf": "Haiphong Sign Language",
"hag": "Hanga",
"hah": "Hahon",
"hai": "Haida",
"haj": "Hajong",
"hak": "Hakka Chinese",
"hal": "Halang",
"ham": "Hewa",
"han": "Hangaza",
"hao": "Hakö",
"hap": "Hupla",
"haq": "Ha",
"har": "Harari",
"has": "Haisla",
"hav": "Havu",
"haw": "Hawaiian",
"hax": "Southern Haida",
"hay": "Haya",
"haz": "Hazaragi",
"hba": "Hamba",
"hbb": "Huba",
"hbn": "Heiban",
"hbo": "Ancient Hebrew",
"hbu": "Habu",
"hca": "Andaman Creole Hindi",
"hch": "Huichol",
"hdn": "Northern Haida",
"hds": "Honduras Sign Language",
"hdy": "Hadiyya",
"he": "Hebrew",
"hea": "Northern Qiandong Miao",
"hed": "Herdé",
"heg": "Helong",
"heh": "Hehe",
"hei": "Heiltsuk",
"hem": "Hemba",
"hgm": "Haiǁom",
"hgw": "Haigwai",
"hhi": "Hoia Hoia",
"hhr": "Kerak",
"hhy": "Hoyahoya",
"hi": "Hindi",
"hia": "Lamang",
"hib": "Hibito",
"hid": "Hidatsa",
"hif": "Fiji Hindi",
"hig": "Kamwe",
"hih": "Pamosu",
"hii": "Hinduri",
"hij": "Hijuk",
"hik": "Seit-Kaitetu",
"hil": "Hiligaynon",
"him": "Himachali languages; Western Pahari languages",
"hio": "Tsoa",
"hir": "Himarimã",
"hit": "Hittite",
"hiw": "Hiw",
"hix": "Hixkaryána",
"hji": "Haji",
"hka": "Kahe",
"hke": "Hunde",
"hkh": "Khah; Poguli",
"hkk": "Hunjara-Kaina Ke",
"hkn": "Mel-Khaonh",
"hks": "Hong Kong Sign Language; Heung Kong Sau Yue",
"hla": "Halia",
"hlb": "Halbi",
"hld": "Halang Doan",
"hle": "Hlersu",
"hlt": "Matu Chin",
"hlu": "Hieroglyphic Luwian",
"hma": "Southern Mashan Hmong; Southern Mashan Miao",
"hmb": "Humburi Senni Songhay",
"hmc": "Central Huishui Hmong; Central Huishui Miao",
"hmd": "Large Flowery Miao; A-hmaos; Da-Hua Miao",
"hme": "Eastern Huishui Hmong; Eastern Huishui Miao",
"hmf": "Hmong Don",
"hmg": "Southwestern Guiyang Hmong",
"hmh": "Southwestern Huishui Hmong; Southwestern Huishui Miao",
"hmi": "Northern Huishui Hmong; Northern Huishui Miao",
"hmj": "Ge; Gejia",
"hmk": "Maek",
"hml": "Luopohe Hmong; Luopohe Miao",
"hmm": "Central Mashan Hmong; Central Mashan Miao",
"hmn": "Hmong; Mong",
"hmp": "Northern Mashan Hmong; Northern Mashan Miao",
"hmq": "Eastern Qiandong Miao",
"hmr": "Hmar",
"hms": "Southern Qiandong Miao",
"hmt": "Hamtai",
"hmu": "Hamap",
"hmv": "Hmong Dô",
"hmw": "Western Mashan Hmong; Western Mashan Miao",
"hmx": "Hmong-Mien languages",
"hmy": "Southern Guiyang Hmong; Southern Guiyang Miao",
"hmz": "Hmong Shua; Sinicized Miao",
"hna": "Mina (Cameroon)",
"hnd": "Southern Hindko",
"hne": "Chhattisgarhi",
"hng": "Hungu",
"hnh": "ǁAni",
"hni": "Hani",
"hnj": "Hmong Njua; Mong Leng; Mong Njua",
"hnn": "Hanunoo",
"hno": "Northern Hindko",
"hns": "Caribbean Hindustani",
"hnu": "Hung",
"ho": "Hiri Motu",
"hoa": "Hoava",
"hob": "Mari (Madang Province)",
"hoc": "Ho",
"hod": "Holma",
"hoe": "Horom",
"hoh": "Hobyót",
"hoi": "Holikachuk",
"hoj": "Hadothi; Haroti",
"hok": "Hokan languages",
"hol": "Holu",
"hom": "Homa",
"hoo": "Holoholo",
"hop": "Hopi",
"hor": "Horo",
"hos": "Ho Chi Minh City Sign Language",
"hot": "Hote; Malê",
"hov": "Hovongan",
"how": "Honi",
"hoy": "Holiya",
"hoz": "Hozo",
"hpo": "Hpon",
"hps": "Hawai'i Sign Language (HSL); Hawai'i Pidgin Sign Language",
"hr": "Croatian",
"hra": "Hrangkhol",
"hrc": "Niwer Mil",
"hre": "Hre",
"hrk": "Haruku",
"hrm": "Horned Miao",
"hro": "Haroi",
"hrp": "Nhirrpi",
"hrt": "Hértevin",
"hru": "Hruso",
"hrw": "Warwar Feni",
"hrx": "Hunsrik",
"hrz": "Harzani",
"hsb": "Upper Sorbian",
"hsh": "Hungarian Sign Language",
"hsl": "Hausa Sign Language",
"hsn": "Xiang Chinese",
"hss": "Harsusi",
"ht": "Haitian; Haitian Creole",
"hti": "Hoti",
"hto": "Minica Huitoto",
"hts": "Hadza",
"htu": "Hitu",
"htx": "Middle Hittite",
"hu": "Hungarian",
"hub": "Huambisa",
"huc": "ǂHua; ǂʼAmkhoe",
"hud": "Huaulu",
"hue": "San Francisco Del Mar Huave",
"huf": "Humene",
"hug": "Huachipaeri",
"huh": "Huilliche",
"hui": "Huli",
"huj": "Northern Guiyang Hmong; Northern Guiyang Miao",
"huk": "Hulung",
"hul": "Hula",
"hum": "Hungana",
"huo": "Hu",
"hup": "Hupa",
"huq": "Tsat",
"hur": "Halkomelem",
"hus": "Huastec",
"hut": "Humla",
"huu": "Murui Huitoto",
"huv": "San Mateo Del Mar Huave",
"huw": "Hukumina",
"hux": "Nüpode Huitoto",
"huy": "Hulaulá",
"huz": "Hunzib",
"hvc": "Haitian Vodoun Culture Language",
"hve": "San Dionisio Del Mar Huave",
"hvk": "Haveke",
"hvn": "Sabu",
"hvv": "Santa María Del Mar Huave",
"hwa": "Wané",
"hwc": "Hawai'i Creole English; Hawai'i Pidgin",
"hwo": "Hwana",
"hy": "Armenian",
"hya": "Hya",
"hyw": "Western Armenian",
"hyx": "Armenian (family)",
"hz": "Herero",
"ia": "Interlingua (International Auxiliary Language Association)",
"iai": "Iaai",
"ian": "Iatmul",
"iar": "Purari",
"iba": "Iban",
"ibb": "Ibibio",
"ibd": "Iwaidja",
"ibe": "Akpes",
"ibg": "Ibanag",
"ibh": "Bih",
"ibl": "Ibaloi",
"ibm": "Agoi",
"ibn": "Ibino",
"ibr": "Ibuoro",
"ibu": "Ibu",
"iby": "Ibani",
"ica": "Ede Ica",
"ich": "Etkywan",
"icl": "Icelandic Sign Language",
"icr": "Islander Creole English",
"id": "Indonesian",
"ida": "Idakho-Isukha-Tiriki; Luidakho-Luisukha-Lutirichi",
"idb": "Indo-Portuguese",
"idc": "Idon; Ajiya",
"idd": "Ede Idaca",
"ide": "Idere",
"idi": "Idi",
"idr": "Indri",
"ids": "Idesa",
"idt": "Idaté",
"idu": "Idoma",
"ie": "Interlingue; Occidental",
"ifa": "Amganad Ifugao",
"ifb": "Batad Ifugao; Ayangan Ifugao",
"ife": "Ifè",
"iff": "Ifo",
"ifk": "Tuwali Ifugao",
"ifm": "Teke-Fuumu",
"ifu": "Mayoyao Ifugao",
"ify": "Keley-I Kallahan",
"ig": "Igbo",
"igb": "Ebira",
"ige": "Igede",
"igg": "Igana",
"igl": "Igala",
"igm": "Kanggape",
"ign": "Ignaciano",
"igo": "Isebe",
"igs": "Interglossa",
"igw": "Igwe",
"ihb": "Iha Based Pidgin",
"ihi": "Ihievbe",
"ihp": "Iha",
"ihw": "Bidhawal",
"ii": "Sichuan Yi; Nuosu",
"iin": "Thiin",
"iir": "Indo-Iranian languages",
"ijc": "Izon",
"ije": "Biseni",
"ijj": "Ede Ije",
"ijn": "Kalabari",
"ijo": "Ijo languages",
"ijs": "Southeast Ijo",
"ik": "Inupiaq",
"ike": "Eastern Canadian Inuktitut",
"iki": "Iko",
"ikk": "Ika",
"ikl": "Ikulu",
"iko": "Olulumo-Ikom",
"ikp": "Ikpeshi",
"ikr": "Ikaranggal",
"iks": "Inuit Sign Language",
"ikt": "Inuinnaqtun; Western Canadian Inuktitut",
"ikv": "Iku-Gora-Ankwa",
"ikw": "Ikwere",
"ikx": "Ik",
"ikz": "Ikizu",
"ila": "Ile Ape",
"ilb": "Ila",
"ilg": "Garig-Ilgar",
"ili": "Ili Turki",
"ilk": "Ilongot",
"ilm": "Iranun (Malaysia)",
"ilo": "Iloko",
"ilp": "Iranun (Philippines)",
"ils": "International Sign",
"ilu": "Ili'uun",
"ilv": "Ilue",
"ima": "Mala Malasar",
"imi": "Anamgura",
"iml": "Miluk",
"imn": "Imonda",
"imo": "Imbongu",
"imr": "Imroing",
"ims": "Marsian",
"imt": "Imotong",
"imy": "Milyan",
"inb": "Inga",
"inc": "Indic languages",
"ine": "Indo-European languages",
"ing": "Degexit'an",
"inh": "Ingush",
"inj": "Jungle Inga",
"inl": "Indonesian Sign Language",
"inm": "Minaean",
"inn": "Isinai",
"ino": "Inoke-Yate",
"inp": "Iñapari",
"ins": "Indian Sign Language",
"int": "Intha",
"inz": "Ineseño",
"io": "Ido",
"ior": "Inor",
"iou": "Tuma-Irumu",
"iow": "Iowa-Oto",
"ipi": "Ipili",
"ipo": "Ipiko",
"iqu": "Iquito",
"iqw": "Ikwo",
"ira": "Iranian languages",
"ire": "Iresim",
"irh": "Irarutu",
"iri": "Rigwe; Irigwe",
"irk": "Iraqw",
"irn": "Irántxe",
"iro": "Iroquoian languages",
"irr": "Ir",
"iru": "Irula",
"irx": "Kamberau",
"iry": "Iraya",
"is": "Icelandic",
"isa": "Isabi",
"isc": "Isconahua",
"isd": "Isnag",
"ise": "Italian Sign Language",
"isg": "Irish Sign Language",
"ish": "Esan",
"isi": "Nkem-Nkum",
"isk": "Ishkashimi",
"ism": "Masimasi",
"isn": "Isanzu",
"iso": "Isoko",
"isr": "Israeli Sign Language",
"ist": "Istriot",
"isu": "Isu (Menchum Division)",
"it": "Italian",
"itb": "Binongan Itneg",
"itc": "Italic languages",
"itd": "Southern Tidung",
"ite": "Itene",
"iti": "Inlaod Itneg",
"itk": "Judeo-Italian",
"itl": "Itelmen",
"itm": "Itu Mbon Uzo",
"ito": "Itonama",
"itr": "Iteri",
"its": "Isekiri",
"itt": "Maeng Itneg",
"itv": "Itawit",
"itw": "Ito",
"itx": "Itik",
"ity": "Moyadan Itneg",
"itz": "Itzá",
"iu": "Inuktitut",
"ium": "Iu Mien",
"ivb": "Ibatan",
"ivv": "Ivatan",
"iwk": "I-Wak",
"iwm": "Iwam",
"iwo": "Iwur",
"iws": "Sepik Iwam",
"ixc": "Ixcatec",
"ixl": "Ixil",
"iya": "Iyayu",
"iyo": "Mesaka",
"iyx": "Yaka (Congo)",
"izh": "Ingrian",
"izr": "Izere",
"izz": "Izii",
"ja": "Japanese",
"jaa": "Jamamadí",
"jab": "Hyam",
"jac": "Popti'; Jakalteko",
"jad": "Jahanka",
"jae": "Yabem",
"jaf": "Jara",
"jah": "Jah Hut",
"jaj": "Zazao",
"jak": "Jakun",
"jal": "Yalahatan",
"jam": "Jamaican Creole English",
"jan": "Jandai",
"jao": "Yanyuwa",
"jaq": "Yaqay",
"jas": "New Caledonian Javanese",
"jat": "Jakati",
"jau": "Yaur",
"jax": "Jambi Malay",
"jay": "Yan-nhangu; Nhangu",
"jaz": "Jawe",
"jbe": "Judeo-Berber",
"jbi": "Badjiri",
"jbj": "Arandai",
"jbk": "Barikewa",
"jbm": "Bijim",
"jbn": "Nafusi",
"jbo": "Lojban",
"jbr": "Jofotek-Bromnya",
"jbt": "Jabutí",
"jbu": "Jukun Takum",
"jbw": "Yawijibaya",
"jcs": "Jamaican Country Sign Language",
"jct": "Krymchak",
"jda": "Jad",
"jdg": "Jadgali",
"jdt": "Judeo-Tat",
"jeb": "Jebero",
"jee": "Jerung",
"jeh": "Jeh",
"jei": "Yei",
"jek": "Jeri Kuo",
"jel": "Yelmek",
"jen": "Dza",
"jer": "Jere",
"jet": "Manem",
"jeu": "Jonkor Bourmataguil",
"jgb": "Ngbee",
"jge": "Judeo-Georgian",
"jgk": "Gwak",
"jgo": "Ngomba",
"jhi": "Jehai",
"jhs": "Jhankot Sign Language",
"jia": "Jina",
"jib": "Jibu",
"jic": "Tol",
"jid": "Bu (Kaduna State)",
"jie": "Jilbe",
"jig": "Jingulu; Djingili",
"jih": "sTodsde; Shangzhai",
"jii": "Jiiddu",
"jil": "Jilim",
"jim": "Jimi (Cameroon)",
"jio": "Jiamao",
"jiq": "Guanyinqiao; Lavrung",
"jit": "Jita",
"jiu": "Youle Jinuo",
"jiv": "Shuar",
"jiy": "Buyuan Jinuo",
"jje": "Jejueo",
"jjr": "Bankal",
"jka": "Kaera",
"jkm": "Mobwa Karen",
"jko": "Kubo",
"jkp": "Paku Karen",
"jkr": "Koro (India)",
"jks": "Amami Koniya Sign Language",
"jku": "Labir",
"jle": "Ngile",
"jls": "Jamaican Sign Language",
"jma": "Dima",
"jmb": "Zumbun",
"jmc": "Machame",
"jmd": "Yamdena",
"jmi": "Jimi (Nigeria)",
"jml": "Jumli",
"jmn": "Makuri Naga",
"jmr": "Kamara",
"jms": "Mashi (Nigeria)",
"jmw": "Mouwase",
"jmx": "Western Juxtlahuaca Mixtec",
"jna": "Jangshung",
"jnd": "Jandavra",
"jng": "Yangman",
"jni": "Janji",
"jnj": "Yemsa",
"jnl": "Rawat",
"jns": "Jaunsari",
"job": "Joba",
"jod": "Wojenaka",
"jog": "Jogi",
"jor": "Jorá",
"jos": "Jordanian Sign Language",
"jow": "Jowulu",
"jpa": "Jewish Palestinian Aramaic",
"jpr": "Judeo-Persian",
"jpx": "Japanese (family)",
"jqr": "Jaqaru",
"jra": "Jarai",
"jrb": "Judeo-Arabic",
"jrr": "Jiru",
"jrt": "Jakattoe",
"jru": "Japrería",
"jsl": "Japanese Sign Language",
"jua": "Júma",
"jub": "Wannu",
"juc": "Jurchen",
"jud": "Worodougou",
"juh": "Hõne",
"jui": "Ngadjuri",
"juk": "Wapan",
"jul": "Jirel",
"jum": "Jumjum",
"jun": "Juang",
"juo": "Jiba",
"jup": "Hupdë",
"jur": "Jurúna",
"jus": "Jumla Sign Language",
"jut": "Jutish",
"juu": "Ju",
"juw": "Wãpha",
"juy": "Juray",
"jv": "Javanese",
"jvd": "Javindo",
"jvn": "Caribbean Javanese",
"jwi": "Jwira-Pepesa",
"jya": "Jiarong",
"jye": "Judeo-Yemeni Arabic",
"jyy": "Jaya",
"ka": "Georgian",
"kaa": "Kara-Kalpak; Karakalpak",
"kab": "Kabyle",
"kac": "Kachin; Jingpho",
"kad": "Adara",
"kae": "Ketangalan",
"kaf": "Katso",
"kag": "Kajaman",
"kah": "Kara (Central African Republic)",
"kai": "Karekare",
"kaj": "Jju",
"kak": "Kalanguya; Kayapa Kallahan",
"kam": "Kamba (Kenya)",
"kao": "Xaasongaxango",
"kap": "Bezhta",
"kaq": "Capanahua",
"kar": "Karen languages",
"kav": "Katukína",
"kaw": "Kawi",
"kax": "Kao",
"kay": "Kamayurá",
"kba": "Kalarko",
"kbb": "Kaxuiâna",
"kbc": "Kadiwéu",
"kbd": "Kabardian",
"kbe": "Kanju",
"kbg": "Khamba",
"kbh": "Camsá",
"kbi": "Kaptiau",
"kbj": "Kari",
"kbk": "Grass Koiari",
"kbl": "Kanembu",
"kbm": "Iwal",
"kbn": "Kare (Central African Republic)",
"kbo": "Keliko",
"kbp": "Kabiyè",
"kbq": "Kamano",
"kbr": "Kafa",
"kbs": "Kande",
"kbt": "Abadi",
"kbu": "Kabutra",
"kbv": "Dera (Indonesia)",
"kbw": "Kaiep",
"kbx": "Ap Ma",
"kby": "Manga Kanuri",
"kbz": "Duhwa",
"kca": "Khanty",
"kcb": "Kawacha",
"kcc": "Lubila",
"kcd": "Ngkâlmpw Kanum",
"kce": "Kaivi",
"kcf": "Ukaan",
"kcg": "Tyap",
"kch": "Vono",
"kci": "Kamantan",
"kcj": "Kobiana",
"kck": "Kalanga",
"kcl": "Kela (Papua New Guinea); Kala",
"kcm": "Gula (Central African Republic)",
"kcn": "Nubi",
"kco": "Kinalakna",
"kcp": "Kanga",
"kcq": "Kamo",
"kcr": "Katla",
"kcs": "Koenoem",
"kct": "Kaian",
"kcu": "Kami (Tanzania)",
"kcv": "Kete",
"kcw": "Kabwari",
"kcx": "Kachama-Ganjule",
"kcy": "Korandje",
"kcz": "Konongo",
"kda": "Worimi",
"kdc": "Kutu",
"kdd": "Yankunytjatjara",
"kde": "Makonde",
"kdf": "Mamusi",
"kdg": "Seba",
"kdh": "Tem",
"kdi": "Kumam",
"kdj": "Karamojong",
"kdk": "Numèè; Kwényi",
"kdl": "Tsikimba",
"kdm": "Kagoma",
"kdn": "Kunda",
"kdo": "Kordofanian languages",
"kdp": "Kaningdon-Nindem",
"kdq": "Koch",
"kdr": "Karaim",
"kdt": "Kuy",
"kdu": "Kadaru",
"kdw": "Koneraw",
"kdx": "Kam",
"kdy": "Keder; Keijar",
"kdz": "Kwaja",
"kea": "Kabuverdianu",
"keb": "Kélé",
"kec": "Keiga",
"ked": "Kerewe",
"kee": "Eastern Keres",
"kef": "Kpessi",
"keg": "Tese",
"keh": "Keak",
"kei": "Kei",
"kej": "Kadar",
"kek": "Kekchí",
"kel": "Kela (Democratic Republic of Congo)",
"kem": "Kemak",
"ken": "Kenyang",
"keo": "Kakwa",
"kep": "Kaikadi",
"keq": "Kamar",
"ker": "Kera",
"kes": "Kugbo",
"ket": "Ket",
"keu": "Akebu",
"kev": "Kanikkaran",
"kew": "West Kewa",
"kex": "Kukna",
"key": "Kupia",
"kez": "Kukele",
"kfa": "Kodava",
"kfb": "Northwestern Kolami",
"kfc": "Konda-Dora",
"kfd": "Korra Koraga",
"kfe": "Kota (India)",
"kff": "Koya",
"kfg": "Kudiya",
"kfh": "Kurichiya",
"kfi": "Kannada Kurumba",
"kfj": "Kemiehua",
"kfk": "Kinnauri",
"kfl": "Kung",
"kfm": "Khunsari",
"kfn": "Kuk",
"kfo": "Koro (Côte d'Ivoire)",
"kfp": "Korwa",
"kfq": "Korku",
"kfr": "Kachhi; Kutchi",
"kfs": "Bilaspuri",
"kft": "Kanjari",
"kfu": "Katkari",
"kfv": "Kurmukar",
"kfw": "Kharam Naga",
"kfx": "Kullu Pahari",
"kfy": "Kumaoni",
"kfz": "Koromfé",
"kg": "Kongo",
"kga": "Koyaga",
"kgb": "Kawe",
"kge": "Komering",
"kgf": "Kube",
"kgg": "Kusunda",
"kgi": "Selangor Sign Language",
"kgj": "Gamale Kham",
"kgk": "Kaiwá",
"kgl": "Kunggari",
"kgm": "Karipúna",
"kgn": "Karingani",
"kgo": "Krongo",
"kgp": "Kaingang",
"kgq": "Kamoro",
"kgr": "Abun",
"kgs": "Kumbainggar",
"kgt": "Somyev",
"kgu": "Kobol",
"kgv": "Karas",
"kgw": "Karon Dori",
"kgx": "Kamaru",
"kgy": "Kyerung",
"kha": "Khasi",
"khb": "Lü",
"khc": "Tukang Besi North",
"khd": "Bädi Kanum",
"khe": "Korowai",
"khf": "Khuen",
"khg": "Khams Tibetan",
"khh": "Kehu",
"khi": "Khoisan languages",
"khj": "Kuturmi",
"khk": "Halh Mongolian",
"khl": "Lusi",
"khn": "Khandesi",
"kho": "Khotanese; Sakan",
"khp": "Kapori; Kapauri",
"khq": "Koyra Chiini Songhay",
"khr": "Kharia",
"khs": "Kasua",
"kht": "Khamti",
"khu": "Nkhumbi",
"khv": "Khvarshi",
"khw": "Khowar",
"khx": "Kanu",
"khy": "Kele (Democratic Republic of Congo)",
"khz": "Keapara",
"ki": "Kikuyu; Gikuyu",
"kia": "Kim",
"kib": "Koalib",
"kic": "Kickapoo",
"kid": "Koshin",
"kie": "Kibet",
"kif": "Eastern Parbate Kham",
"kig": "Kimaama; Kimaghima",
"kih": "Kilmeri",
"kii": "Kitsai",
"kij": "Kilivila",
"kil": "Kariya",
"kim": "Karagas",
"kio": "Kiowa",
"kip": "Sheshi Kham",
"kiq": "Kosadle; Kosare",
"kis": "Kis",
"kit": "Agob",
"kiu": "Kirmanjki (individual language)",
"kiv": "Kimbu",
"kiw": "Northeast Kiwai",
"kix": "Khiamniungan Naga",
"kiy": "Kirikiri",
"kiz": "Kisi",
"kj": "Kuanyama; Kwanyama",
"kja": "Mlap",
"kjb": "Q'anjob'al; Kanjobal",
"kjc": "Coastal Konjo",
"kjd": "Southern Kiwai",
"kje": "Kisar",
"kjg": "Khmu",
"kjh": "Khakas",
"kji": "Zabana",
"kjj": "Khinalugh",
"kjk": "Highland Konjo",
"kjl": "Western Parbate Kham",
"kjm": "Kháng",
"kjn": "Kunjen",
"kjo": "Harijan Kinnauri",
"kjp": "Pwo Eastern Karen",
"kjq": "Western Keres",
"kjr": "Kurudu",
"kjs": "East Kewa",
"kjt": "Phrae Pwo Karen",
"kju": "Kashaya",
"kjv": "Kaikavian Literary Language",
"kjx": "Ramopa",
"kjy": "Erave",
"kjz": "Bumthangkha",
"kk": "Kazakh",
"kka": "Kakanda",
"kkb": "Kwerisa",
"kkc": "Odoodee",
"kkd": "Kinuku",
"kke": "Kakabe",
"kkf": "Kalaktang Monpa",
"kkg": "Mabaka Valley Kalinga",
"kkh": "Khün",
"kki": "Kagulu",
"kkj": "Kako",
"kkk": "Kokota",
"kkl": "Kosarek Yale",
"kkm": "Kiong",
"kkn": "Kon Keu",
"kko": "Karko",
"kkp": "Gugubera; Koko-Bera",
"kkq": "Kaeku",
"kkr": "Kir-Balar",
"kks": "Giiwo",
"kkt": "Koi",
"kku": "Tumi",
"kkv": "Kangean",
"kkw": "Teke-Kukuya",
"kkx": "Kohin",
"kky": "Guugu Yimidhirr; Guguyimidjir",
"kkz": "Kaska",
"kl": "Kalaallisut; Greenlandic",
"kla": "Klamath-Modoc",
"klb": "Kiliwa",
"klc": "Kolbila",
"kld": "Gamilaraay",
"kle": "Kulung (Nepal)",
"klf": "Kendeje",
"klg": "Tagakaulo",
"klh": "Weliki",
"kli": "Kalumpang",
"klj": "Khalaj",
"klk": "Kono (Nigeria)",
"kll": "Kagan Kalagan",
"klm": "Migum",
"kln": "Kalenjin",
"klo": "Kapya",
"klp": "Kamasa",
"klq": "Rumu",
"klr": "Khaling",
"kls": "Kalasha",
"klt": "Nukna",
"klu": "Klao",
"klv": "Maskelynes",
"klw": "Tado; Lindu",
"klx": "Koluwawa",
"kly": "Kalao",
"klz": "Kabola",
"km": "Khmer; Central Khmer",
"kma": "Konni",
"kmb": "Kimbundu",
"kmc": "Southern Dong",
"kmd": "Majukayang Kalinga",
"kme": "Bakole",
"kmf": "Kare (Papua New Guinea)",
"kmg": "Kâte",
"kmh": "Kalam",
"kmi": "Kami (Nigeria)",
"kmj": "Kumarbhag Paharia",
"kmk": "Limos Kalinga",
"kml": "Tanudan Kalinga",
"kmm": "Kom (India)",
"kmn": "Awtuw",
"kmo": "Kwoma",
"kmp": "Gimme",
"kmq": "Kwama",
"kmr": "Northern Kurdish",
"kms": "Kamasau",
"kmt": "Kemtuik",
"kmu": "Kanite",
"kmv": "Karipúna Creole French",
"kmw": "Komo (Democratic Republic of Congo)",
"kmx": "Waboda",
"kmy": "Koma",
"kmz": "Khorasani Turkish",
"kn": "Kannada",
"kna": "Dera (Nigeria)",
"knb": "Lubuagan Kalinga",
"knc": "Central Kanuri",
"knd": "Konda",
"kne": "Kankanaey",
"knf": "Mankanya",
"kng": "Koongo",
"kni": "Kanufi",
"knj": "Western Kanjobal",
"knk": "Kuranko",
"knl": "Keninjal",
"knm": "Kanamarí",
"knn": "Konkani (individual language)",
"kno": "Kono (Sierra Leone)",
"knp": "Kwanja",
"knq": "Kintaq",
"knr": "Kaningra",
"kns": "Kensiu",
"knt": "Panoan Katukína",
"knu": "Kono (Guinea)",
"knv": "Tabo",
"knw": "Kung-Ekoka",
"knx": "Kendayan; Salako",
"kny": "Kanyok",
"knz": "Kalamsé",
"ko": "Korean",
"koa": "Konomala",
"koc": "Kpati",
"kod": "Kodi",
"koe": "Kacipo-Bale Suri",
"kof": "Kubi",
"kog": "Cogui; Kogi",
"koh": "Koyo",
"koi": "Komi-Permyak",
"kok": "Konkani (macrolanguage)",
"kol": "Kol (Papua New Guinea)",
"koo": "Konzo",
"kop": "Waube",
"koq": "Kota (Gabon)",
"kos": "Kosraean",
"kot": "Lagwan",
"kou": "Koke",
"kov": "Kudu-Camo",
"kow": "Kugama",
"koy": "Koyukon",
"koz": "Korak",
"kpa": "Kutto",
"kpb": "Mullu Kurumba",
"kpc": "Curripaco",
"kpd": "Koba",
"kpe": "Kpelle",
"kpf": "Komba",
"kpg": "Kapingamarangi",
"kph": "Kplang",
"kpi": "Kofei",
"kpj": "Karajá",
"kpk": "Kpan",
"kpl": "Kpala",
"kpm": "Koho",
"kpn": "Kepkiriwát",
"kpo": "Ikposo",
"kpq": "Korupun-Sela",
"kpr": "Korafe-Yegha",
"kps": "Tehit",
"kpt": "Karata",
"kpu": "Kafoa",
"kpv": "Komi-Zyrian",
"kpw": "Kobon",
"kpx": "Mountain Koiali",
"kpy": "Koryak",
"kpz": "Kupsabiny",
"kqa": "Mum",
"kqb": "Kovai",
"kqc": "Doromu-Koki",
"kqd": "Koy Sanjaq Surat",
"kqe": "Kalagan",
"kqf": "Kakabai",
"kqg": "Khe",
"kqh": "Kisankasa",
"kqi": "Koitabu",
"kqj": "Koromira",
"kqk": "Kotafon Gbe",
"kql": "Kyenele",
"kqm": "Khisa",
"kqn": "Kaonde",
"kqo": "Eastern Krahn",
"kqp": "Kimré",
"kqq": "Krenak",
"kqr": "Kimaragang",
"kqs": "Northern Kissi",
"kqt": "Klias River Kadazan",
"kqu": "Seroa",
"kqv": "Okolod",
"kqw": "Kandas",
"kqx": "Mser",
"kqy": "Koorete",
"kqz": "Korana",
"kr": "Kanuri",
"kra": "Kumhali",
"krb": "Karkin",
"krc": "Karachay-Balkar",
"krd": "Kairui-Midiki",
"kre": "Panará",
"krf": "Koro (Vanuatu)",
"krh": "Kurama",
"kri": "Krio",
"krj": "Kinaray-A",
"krk": "Kerek",
"krl": "Karelian",
"krn": "Sapo",
"kro": "Kru languages",
"krp": "Korop",
"krr": "Krung",
"krs": "Gbaya (Sudan)",
"krt": "Tumari Kanuri",
"kru": "Kurukh",
"krv": "Kavet",
"krw": "Western Krahn",
"krx": "Karon",
"kry": "Kryts",
"krz": "Sota Kanum",
"ks": "Kashmiri",
"ksa": "Shuwa-Zamani",
"ksb": "Shambala",
"ksc": "Southern Kalinga",
"ksd": "Kuanua",
"kse": "Kuni",
"ksf": "Bafia",
"ksg": "Kusaghe",
"ksh": "Kölsch",
"ksi": "Krisa; I'saka",
"ksj": "Uare",
"ksk": "Kansa",
"ksl": "Kumalu",
"ksm": "Kumba",
"ksn": "Kasiguranin",
"kso": "Kofa",
"ksp": "Kaba",
"ksq": "Kwaami",
"ksr": "Borong",
"kss": "Southern Kisi",
"kst": "Winyé",
"ksu": "Khamyang",
"ksv": "Kusu",
"ksw": "S'gaw Karen",
"ksx": "Kedang",
"ksy": "Kharia Thar",
"ksz": "Kodaku",
"kta": "Katua",
"ktb": "Kambaata",
"ktc": "Kholok",
"ktd": "Kokata; Kukatha",
"kte": "Nubri",
"ktf": "Kwami",
"ktg": "Kalkutung",
"kth": "Karanga",
"kti": "North Muyu",
"ktj": "Plapo Krumen",
"ktk": "Kaniet",
"ktl": "Koroshi",
"ktm": "Kurti",
"ktn": "Karitiâna",
"kto": "Kuot",
"ktp": "Kaduo",
"ktq": "Katabaga",
"kts": "South Muyu",
"ktt": "Ketum",
"ktu": "Kituba (Democratic Republic of Congo)",
"ktv": "Eastern Katu",
"ktw": "Kato",
"ktx": "Kaxararí",
"kty": "Kango (Bas-Uélé District)",
"ktz": "Juǀʼhoan; Juǀʼhoansi",
"ku": "Kurdish",
"kub": "Kutep",
"kuc": "Kwinsu",
"kud": "'Auhelawa",
"kue": "Kuman (Papua New Guinea)",
"kuf": "Western Katu",
"kug": "Kupa",
"kuh": "Kushi",
"kui": "Kuikúro-Kalapálo; Kalapalo",
"kuj": "Kuria",
"kuk": "Kepo'",
"kul": "Kulere",
"kum": "Kumyk",
"kun": "Kunama",
"kuo": "Kumukio",
"kup": "Kunimaipa",
"kuq": "Karipuna",
"kus": "Kusaal",
"kut": "Kutenai",
"kuu": "Upper Kuskokwim",
"kuv": "Kur",
"kuw": "Kpagua",
"kux": "Kukatja",
"kuy": "Kuuku-Ya'u",
"kuz": "Kunza",
"kv": "Komi",
"kva": "Bagvalal",
"kvb": "Kubu",
"kvc": "Kove",
"kvd": "Kui (Indonesia)",
"kve": "Kalabakan",
"kvf": "Kabalai",
"kvg": "Kuni-Boazi",
"kvh": "Komodo",
"kvi": "Kwang",
"kvj": "Psikye",
"kvk": "Korean Sign Language",
"kvl": "Kayaw",
"kvm": "Kendem",
"kvn": "Border Kuna",
"kvo": "Dobel",
"kvp": "Kompane",
"kvq": "Geba Karen",
"kvr": "Kerinci",
"kvt": "Lahta Karen; Lahta",
"kvu": "Yinbaw Karen",
"kvv": "Kola",
"kvw": "Wersing",
"kvx": "Parkari Koli",
"kvy": "Yintale Karen; Yintale",
"kvz": "Tsakwambo; Tsaukambo",
"kw": "Cornish",
"kwa": "Dâw",
"kwb": "Kwa",
"kwc": "Likwala",
"kwd": "Kwaio",
"kwe": "Kwerba",
"kwf": "Kwara'ae",
"kwg": "Sara Kaba Deme",
"kwh": "Kowiai",
"kwi": "Awa-Cuaiquer",
"kwj": "Kwanga",
"kwk": "Kwakiutl",
"kwl": "Kofyar",
"kwm": "Kwambi",
"kwn": "Kwangali",
"kwo": "Kwomtari",
"kwp": "Kodia",
"kwr": "Kwer",
"kws": "Kwese",
"kwt": "Kwesten",
"kwu": "Kwakum",
"kwv": "Sara Kaba Náà",
"kww": "Kwinti",
"kwx": "Khirwar",
"kwy": "San Salvador Kongo",
"kwz": "Kwadi",
"kxa": "Kairiru",
"kxb": "Krobu",
"kxc": "Konso; Khonso",
"kxd": "Brunei",
"kxf": "Manumanaw Karen; Manumanaw",
"kxh": "Karo (Ethiopia)",
"kxi": "Keningau Murut",
"kxj": "Kulfa",
"kxk": "Zayein Karen",
"kxm": "Northern Khmer",
"kxn": "Kanowit-Tanjong Melanau",
"kxo": "Kanoé",
"kxp": "Wadiyara Koli",
"kxq": "Smärky Kanum",
"kxr": "Koro (Papua New Guinea)",
"kxs": "Kangjia",
"kxt": "Koiwat",
"kxv": "Kuvi",
"kxw": "Konai",
"kxx": "Likuba",
"kxy": "Kayong",
"kxz": "Kerewo",
"ky": "Kirghiz; Kyrgyz",
"kya": "Kwaya",
"kyb": "Butbut Kalinga",
"kyc": "Kyaka",
"kyd": "Karey",
"kye": "Krache",
"kyf": "Kouya",
"kyg": "Keyagana",
"kyh": "Karok",
"kyi": "Kiput",
"kyj": "Karao",
"kyk": "Kamayo",
"kyl": "Kalapuya",
"kym": "Kpatili",
"kyn": "Northern Binukidnon",
"kyo": "Kelon",
"kyp": "Kang",
"kyq": "Kenga",
"kyr": "Kuruáya",
"kys": "Baram Kayan",
"kyt": "Kayagar",
"kyu": "Western Kayah",
"kyv": "Kayort",
"kyw": "Kudmali",
"kyx": "Rapoisi",
"kyy": "Kambaira",
"kyz": "Kayabí",
"kza": "Western Karaboro",
"kzb": "Kaibobo",
"kzc": "Bondoukou Kulango",
"kzd": "Kadai",
"kze": "Kosena",
"kzf": "Da'a Kaili",
"kzg": "Kikai",
"kzi": "Kelabit",
"kzk": "Kazukuru",
"kzl": "Kayeli",
"kzm": "Kais",
"kzn": "Kokola",
"kzo": "Kaningi",
"kzp": "Kaidipang",
"kzq": "Kaike",
"kzr": "Karang",
"kzs": "Sugut Dusun",
"kzu": "Kayupulau",
"kzv": "Komyandaret",
"kzw": "Karirí-Xocó",
"kzx": "Kamarian",
"kzy": "Kango (Tshopo District)",
"kzz": "Kalabra",
"la": "Latin",
"laa": "Southern Subanen",
"lab": "Linear A",
"lac": "Lacandon",
"lad": "Ladino",
"lae": "Pattani",
"laf": "Lafofa",
"lag": "Langi",
"lah": "Lahnda",
"lai": "Lambya",
"laj": "Lango (Uganda)",
"lal": "Lalia",
"lam": "Lamba",
"lan": "Laru",
"lap": "Laka (Chad)",
"laq": "Qabiao",
"lar": "Larteh",
"las": "Lama (Togo)",
"lau": "Laba",
"law": "Lauje",
"lax": "Tiwa",
"lay": "Lama Bai",
"laz": "Aribwatsa",
"lb": "Luxembourgish; Letzeburgesch",
"lbb": "Label",
"lbc": "Lakkia",
"lbe": "Lak",
"lbf": "Tinani",
"lbg": "Laopang",
"lbi": "La'bi",
"lbj": "Ladakhi",
"lbk": "Central Bontok",
"lbl": "Libon Bikol",
"lbm": "Lodhi",
"lbn": "Rmeet",
"lbo": "Laven",
"lbq": "Wampar",
"lbr": "Lohorung",
"lbs": "Libyan Sign Language",
"lbt": "Lachi",
"lbu": "Labu",
"lbv": "Lavatbura-Lamusong",
"lbw": "Tolaki",
"lbx": "Lawangan",
"lby": "Lamalama; Lamu-Lamu",
"lbz": "Lardil",
"lcc": "Legenyem",
"lcd": "Lola",
"lce": "Loncong; Sekak",
"lcf": "Lubu",
"lch": "Luchazi",
"lcl": "Lisela",
"lcm": "Tungag",
"lcp": "Western Lawa",
"lcq": "Luhu",
"lcs": "Lisabata-Nuniali",
"lda": "Kla-Dan",
"ldb": "Dũya",
"ldd": "Luri",
"ldg": "Lenyima",
"ldh": "Lamja-Dengsa-Tola",
"ldi": "Laari",
"ldj": "Lemoro",
"ldk": "Leelau",
"ldl": "Kaan",
"ldm": "Landoma",
"ldn": "Láadan",
"ldo": "Loo",
"ldp": "Tso",
"ldq": "Lufu",
"lea": "Lega-Shabunda",
"leb": "Lala-Bisa",
"lec": "Leco",
"led": "Lendu",
"lee": "Lyélé",
"lef": "Lelemi",
"leh": "Lenje",
"lei": "Lemio",
"lej": "Lengola",
"lek": "Leipon",
"lel": "Lele (Democratic Republic of Congo)",
"lem": "Nomaande",
"len": "Lenca",
"leo": "Leti (Cameroon)",
"lep": "Lepcha",
"leq": "Lembena",
"ler": "Lenkau",
"les": "Lese",
"let": "Lesing-Gelimi; Amio-Gelimi",
"leu": "Kara (Papua New Guinea)",
"lev": "Lamma",
"lew": "Ledo Kaili",
"lex": "Luang",
"ley": "Lemolang",
"lez": "Lezghian",
"lfa": "Lefa",
"lfn": "Lingua Franca Nova",
"lg": "Ganda; Luganda",
"lga": "Lungga",
"lgb": "Laghu",
"lgg": "Lugbara",
"lgh": "Laghuu",
"lgi": "Lengilu",
"lgk": "Lingarak; Neverver",
"lgl": "Wala",
"lgm": "Lega-Mwenga",
"lgn": "T'apo; Opuuo",
"lgo": "Lango (South Sudan)",
"lgq": "Logba",
"lgr": "Lengo",
"lgt": "Pahi",
"lgu": "Longgu",
"lgz": "Ligenza",
"lha": "Laha (Viet Nam)",
"lhh": "Laha (Indonesia)",
"lhi": "Lahu Shi",
"lhl": "Lahul Lohar",
"lhm": "Lhomi",
"lhn": "Lahanan",
"lhp": "Lhokpu",
"lhs": "Mlahsö",
"lht": "Lo-Toga",
"lhu": "Lahu",
"li": "Limburgan; Limburger; Limburgish",
"lia": "West-Central Limba",
"lib": "Likum",
"lic": "Hlai",
"lid": "Nyindrou",
"lie": "Likila",
"lif": "Limbu",
"lig": "Ligbi",
"lih": "Lihir",
"lij": "Ligurian",
"lik": "Lika",
"lil": "Lillooet",
"lio": "Liki",
"lip": "Sekpele",
"liq": "Libido",
"lir": "Liberian English",
"lis": "Lisu",
"liu": "Logorik",
"liv": "Liv",
"liw": "Col",
"lix": "Liabuku",
"liy": "Banda-Bambari",
"liz": "Libinza",
"lja": "Golpa",
"lje": "Rampi",
"lji": "Laiyolo",
"ljl": "Li'o",
"ljp": "Lampung Api",
"ljw": "Yirandali",
"ljx": "Yuru",
"lka": "Lakalei",
"lkb": "Kabras; Lukabaras",
"lkc": "Kucong",
"lkd": "Lakondê",
"lke": "Kenyi",
"lkh": "Lakha",
"lki": "Laki",
"lkj": "Remun",
"lkl": "Laeko-Libuat",
"lkm": "Kalaamaya",
"lkn": "Lakon; Vure",
"lko": "Khayo; Olukhayo",
"lkr": "Päri",
"lks": "Kisa; Olushisa",
"lkt": "Lakota",
"lku": "Kungkari",
"lky": "Lokoya",
"lla": "Lala-Roba",
"llb": "Lolo",
"llc": "Lele (Guinea)",
"lld": "Ladin",
"lle": "Lele (Papua New Guinea)",
"llf": "Hermit",
"llg": "Lole",
"llh": "Lamu",
"lli": "Teke-Laali",
"llj": "Ladji Ladji",
"llk": "Lelak",
"lll": "Lilau",
"llm": "Lasalimu",
"lln": "Lele (Chad)",
"llp": "North Efate",
"llq": "Lolak",
"lls": "Lithuanian Sign Language",
"llu": "Lau",
"llx": "Lauan",
"lma": "East Limba",
"lmb": "Merei",
"lmc": "Limilngan",
"lmd": "Lumun",
"lme": "Pévé",
"lmf": "South Lembata",
"lmg": "Lamogai",
"lmh": "Lambichhong",
"lmi": "Lombi",
"lmj": "West Lembata",
"lmk": "Lamkang",
"lml": "Hano",
"lmn": "Lambadi",
"lmo": "Lombard",
"lmp": "Limbum",
"lmq": "Lamatuka",
"lmr": "Lamalera",
"lmu": "Lamenu",
"lmv": "Lomaiviti",
"lmw": "Lake Miwok",
"lmx": "Laimbue",
"lmy": "Lamboya",
"ln": "Lingala",
"lna": "Langbashe",
"lnb": "Mbalanhu",
"lnd": "Lundayeh; Lun Bawang",
"lng": "Langobardic",
"lnh": "Lanoh",
"lni": "Daantanai'",
"lnj": "Leningitij",
"lnl": "South Central Banda",
"lnm": "Langam",
"lnn": "Lorediakarkar",
"lns": "Lamnso'",
"lnu": "Longuda",
"lnw": "Lanima",
"lnz": "Lonzo",
"lo": "Lao",
"loa": "Loloda",
"lob": "Lobi",
"loc": "Inonhan",
"loe": "Saluan",
"lof": "Logol",
"log": "Logo",
"loh": "Narim",
"loi": "Loma (Côte d'Ivoire)",
"loj": "Lou",
"lok": "Loko",
"lol": "Mongo",
"lom": "Loma (Liberia)",
"lon": "Malawi Lomwe",
"loo": "Lombo",
"lop": "Lopa",
"loq": "Lobala",
"lor": "Téén",
"los": "Loniu",
"lot": "Otuho",
"lou": "Louisiana Creole",
"lov": "Lopi",
"low": "Tampias Lobu",
"lox": "Loun",
"loy": "Loke",
"loz": "Lozi",
"lpa": "Lelepa",
"lpe": "Lepki",
"lpn": "Long Phuri Naga",
"lpo": "Lipo",
"lpx": "Lopit",
"lqr": "Logir",
"lra": "Rara Bakati'",
"lrc": "Northern Luri",
"lre": "Laurentian",
"lrg": "Laragia",
"lri": "Marachi; Olumarachi",
"lrk": "Loarki",
"lrl": "Lari",
"lrm": "Marama; Olumarama",
"lrn": "Lorang",
"lro": "Laro",
"lrr": "Southern Yamphu",
"lrt": "Larantuka Malay",
"lrv": "Larevat",
"lrz": "Lemerig",
"lsa": "Lasgerdi",
"lsb": "Burundian Sign Language; Langue des Signes Burundaise",
"lsc": "Albarradas Sign Language; Lengua de señas Albarradas",
"lsd": "Lishana Deni",
"lse": "Lusengo",
"lsh": "Lish",
"lsi": "Lashi",
"lsl": "Latvian Sign Language",
"lsm": "Saamia; Olusamia",
"lsn": "Tibetan Sign Language",
"lso": "Laos Sign Language",
"lsp": "Panamanian Sign Language; Lengua de Señas Panameñas",
"lsr": "Aruop",
"lss": "Lasi",
"lst": "Trinidad and Tobago Sign Language",
"lsv": "Sivia Sign Language",
"lsw": "Seychelles Sign Language; Lalang Siny Seselwa; Langue des Signes Seychelloise",
"lsy": "Mauritian Sign Language",
"lt": "Lithuanian",
"ltc": "Late Middle Chinese",
"ltg": "Latgalian",
"lth": "Thur",
"lti": "Leti (Indonesia)",
"ltn": "Latundê",
"lto": "Tsotso; Olutsotso",
"lts": "Tachoni; Lutachoni",
"ltu": "Latu",
"lu": "Luba-Katanga",
"lua": "Luba-Lulua",
"luc": "Aringa",
"lud": "Ludian",
"lue": "Luvale",
"luf": "Laua",
"lui": "Luiseno",
"luj": "Luna",
"luk": "Lunanakha",
"lul": "Olu'bo",
"lum": "Luimbi",
"lun": "Lunda",
"luo": "Luo (Kenya and Tanzania); Dholuo",
"lup": "Lumbu",
"luq": "Lucumi",
"lur": "Laura",
"lus": "Lushai",
"lut": "Lushootseed",
"luu": "Lumba-Yakkha",
"luv": "Luwati",
"luw": "Luo (Cameroon)",
"luy": "Luyia; Oluluyia",
"luz": "Southern Luri",
"lv": "Latvian",
"lva": "Maku'a",
"lvi": "Lavi",
"lvk": "Lavukaleve",
"lvs": "Standard Latvian",
"lvu": "Levuka",
"lwa": "Lwalu",
"lwe": "Lewo Eleng",
"lwg": "Wanga; Oluwanga",
"lwh": "White Lachi",
"lwl": "Eastern Lawa",
"lwm": "Laomian",
"lwo": "Luwo",
"lws": "Malawian Sign Language",
"lwt": "Lewotobi",
"lwu": "Lawu",
"lww": "Lewo",
"lxm": "Lakurumau",
"lya": "Layakha",
"lyg": "Lyngngam",
"lyn": "Luyana",
"lzh": "Literary Chinese",
"lzl": "Litzlitz",
"lzn": "Leinong Naga",
"lzz": "Laz",
"maa": "San Jerónimo Tecóatl Mazatec",
"mab": "Yutanduchi Mixtec",
"mad": "Madurese",
"mae": "Bo-Rukul",
"maf": "Mafa",
"mag": "Magahi",
"mai": "Maithili",
"maj": "Jalapa De Díaz Mazatec",
"mak": "Makasar",
"mam": "Mam",
"man": "Mandingo; Manding",
"map": "Austronesian languages",
"maq": "Chiquihuitlán Mazatec",
"mas": "Masai",
"mat": "San Francisco Matlatzinca",
"mau": "Huautla Mazatec",
"mav": "Sateré-Mawé",
"maw": "Mampruli",
"max": "North Moluccan Malay",
"maz": "Central Mazahua",
"mba": "Higaonon",
"mbb": "Western Bukidnon Manobo",
"mbc": "Macushi",
"mbd": "Dibabawon Manobo",
"mbe": "Molale",
"mbf": "Baba Malay",
"mbh": "Mangseng",
"mbi": "Ilianen Manobo",
"mbj": "Nadëb",
"mbk": "Malol",
"mbl": "Maxakalí",
"mbm": "Ombamba",
"mbn": "Macaguán",
"mbo": "Mbo (Cameroon)",
"mbp": "Malayo",
"mbq": "Maisin",
"mbr": "Nukak Makú",
"mbs": "Sarangani Manobo",
"mbt": "Matigsalug Manobo",
"mbu": "Mbula-Bwazza",
"mbv": "Mbulungish",
"mbw": "Maring",
"mbx": "Mari (East Sepik Province)",
"mby": "Memoni",
"mbz": "Amoltepec Mixtec",
"mca": "Maca",
"mcb": "Machiguenga",
"mcc": "Bitur",
"mcd": "Sharanahua",
"mce": "Itundujia Mixtec",
"mcf": "Matsés",
"mcg": "Mapoyo",
"mch": "Maquiritari",
"mci": "Mese",
"mcj": "Mvanip",
"mck": "Mbunda",
"mcl": "Macaguaje",
"mcm": "Malaccan Creole Portuguese",
"mcn": "Masana",
"mco": "Coatlán Mixe",
"mcp": "Makaa",
"mcq": "Ese",
"mcr": "Menya",
"mcs": "Mambai",
"mct": "Mengisa",
"mcu": "Cameroon Mambila",
"mcv": "Minanibai",
"mcw": "Mawa (Chad)",
"mcx": "Mpiemo",
"mcy": "South Watut",
"mcz": "Mawan",
"mda": "Mada (Nigeria)",
"mdb": "Morigi",
"mdc": "Male (Papua New Guinea)",
"mdd": "Mbum",
"mde": "Maba (Chad)",
"mdf": "Moksha",
"mdg": "Massalat",
"mdh": "Maguindanaon",
"mdi": "Mamvu",
"mdj": "Mangbetu",
"mdk": "Mangbutu",
"mdl": "Maltese Sign Language",
"mdm": "Mayogo",
"mdn": "Mbati",
"mdp": "Mbala",
"mdq": "Mbole",
"mdr": "Mandar",
"mds": "Maria (Papua New Guinea)",
"mdt": "Mbere",
"mdu": "Mboko",
"mdv": "Santa Lucía Monteverde Mixtec",
"mdw": "Mbosi",
"mdx": "Dizin",
"mdy": "Male (Ethiopia)",
"mdz": "Suruí Do Pará",
"mea": "Menka",
"meb": "Ikobi",
"mec": "Marra",
"med": "Melpa",
"mee": "Mengen",
"mef": "Megam",
"meh": "Southwestern Tlaxiaco Mixtec",
"mei": "Midob",
"mej": "Meyah",
"mek": "Mekeo",
"mel": "Central Melanau",
"mem": "Mangala",
"men": "Mende (Sierra Leone)",
"meo": "Kedah Malay",
"mep": "Miriwoong",
"meq": "Merey",
"mer": "Meru",
"mes": "Masmaje",
"met": "Mato",
"meu": "Motu",
"mev": "Mano",
"mew": "Maaka",
"mey": "Hassaniyya",
"mez": "Menominee",
"mfa": "Pattani Malay",
"mfb": "Bangka",
"mfc": "Mba",
"mfd": "Mendankwe-Nkwen",
"mfe": "Morisyen",
"mff": "Naki",
"mfg": "Mogofin",
"mfh": "Matal",
"mfi": "Wandala",
"mfj": "Mefele",
"mfk": "North Mofu",
"mfl": "Putai",
"mfm": "Marghi South",
"mfn": "Cross River Mbembe",
"mfo": "Mbe",
"mfp": "Makassar Malay",
"mfq": "Moba",
"mfr": "Marrithiyel",
"mfs": "Mexican Sign Language",
"mft": "Mokerang",
"mfu": "Mbwela",
"mfv": "Mandjak",
"mfw": "Mulaha",
"mfx": "Melo",
"mfy": "Mayo",
"mfz": "Mabaan",
"mg": "Malagasy",
"mga": "Middle Irish (900-1200)",
"mgb": "Mararit",
"mgc": "Morokodo",
"mgd": "Moru",
"mge": "Mango",
"mgf": "Maklew",
"mgg": "Mpumpong",
"mgh": "Makhuwa-Meetto",
"mgi": "Lijili",
"mgj": "Abureni",
"mgk": "Mawes",
"mgl": "Maleu-Kilenge",
"mgm": "Mambae",
"mgn": "Mbangi",
"mgo": "Meta'",
"mgp": "Eastern Magar",
"mgq": "Malila",
"mgr": "Mambwe-Lungu",
"mgs": "Manda (Tanzania)",
"mgt": "Mongol",
"mgu": "Mailu",
"mgv": "Matengo",
"mgw": "Matumbi",
"mgy": "Mbunga",
"mgz": "Mbugwe",
"mh": "Marshallese",
"mha": "Manda (India)",
"mhb": "Mahongwe",
"mhc": "Mocho",
"mhd": "Mbugu",
"mhe": "Besisi; Mah Meri",
"mhf": "Mamaa",
"mhg": "Margu",
"mhi": "Ma'di",
"mhj": "Mogholi",
"mhk": "Mungaka",
"mhl": "Mauwake",
"mhm": "Makhuwa-Moniga",
"mhn": "Mócheno",
"mho": "Mashi (Zambia)",
"mhp": "Balinese Malay",
"mhq": "Mandan",
"mhr": "Eastern Mari",
"mhs": "Buru (Indonesia)",
"mht": "Mandahuaca",
"mhu": "Digaro-Mishmi; Darang Deng",
"mhw": "Mbukushu",
"mhx": "Maru; Lhaovo",
"mhy": "Ma'anyan",
"mhz": "Mor (Mor Islands)",
"mi": "Maori",
"mia": "Miami",
"mib": "Atatláhuca Mixtec",
"mic": "Mi'kmaq; Micmac",
"mid": "Mandaic",
"mie": "Ocotepec Mixtec",
"mif": "Mofu-Gudur",
"mig": "San Miguel El Grande Mixtec",
"mih": "Chayuco Mixtec",
"mii": "Chigmecatitlán Mixtec",
"mij": "Abar; Mungbam",
"mik": "Mikasuki",
"mil": "Peñoles Mixtec",
"mim": "Alacatlatzala Mixtec",
"min": "Minangkabau",
"mio": "Pinotepa Nacional Mixtec",
"mip": "Apasco-Apoala Mixtec",
"miq": "Mískito",
"mir": "Isthmus Mixe",
"mit": "Southern Puebla Mixtec",
"miu": "Cacaloxtepec Mixtec",
"miw": "Akoye",
"mix": "Mixtepec Mixtec",
"miy": "Ayutla Mixtec",
"miz": "Coatzospan Mixtec",
"mjb": "Makalero",
"mjc": "San Juan Colorado Mixtec",
"mjd": "Northwest Maidu",
"mje": "Muskum",
"mjg": "Tu",
"mjh": "Mwera (Nyasa)",
"mji": "Kim Mun",
"mjj": "Mawak",
"mjk": "Matukar",
"mjl": "Mandeali",
"mjm": "Medebur",
"mjn": "Ma (Papua New Guinea)",
"mjo": "Malankuravan",
"mjp": "Malapandaram",
"mjq": "Malaryan",
"mjr": "Malavedan",
"mjs": "Miship",
"mjt": "Sauria Paharia",
"mju": "Manna-Dora",
"mjv": "Mannan",
"mjw": "Karbi",
"mjx": "Mahali",
"mjy": "Mahican",
"mjz": "Majhi",
"mk": "Macedonian",
"mka": "Mbre",
"mkb": "Mal Paharia",
"mkc": "Siliput",
"mke": "Mawchi",
"mkf": "Miya",
"mkg": "Mak (China)",
"mkh": "Mon-Khmer languages",
"mki": "Dhatki",
"mkj": "Mokilese",
"mkk": "Byep",
"mkl": "Mokole",
"mkm": "Moklen",
"mkn": "Kupang Malay",
"mko": "Mingang Doso",
"mkp": "Moikodi",
"mkq": "Bay Miwok",
"mkr": "Malas",
"mks": "Silacayoapan Mixtec",
"mkt": "Vamale",
"mku": "Konyanka Maninka",
"mkv": "Mafea",
"mkw": "Kituba (Congo)",
"mkx": "Kinamiging Manobo",
"mky": "East Makian",
"mkz": "Makasae",
"ml": "Malayalam",
"mla": "Malo",
"mlb": "Mbule",
"mlc": "Cao Lan",
"mle": "Manambu",
"mlf": "Mal",
"mlh": "Mape",
"mli": "Malimpung",
"mlj": "Miltu",
"mlk": "Ilwana; Kiwilwana",
"mll": "Malua Bay",
"mlm": "Mulam",
"mln": "Malango",
"mlo": "Mlomp",
"mlp": "Bargam",
"mlq": "Western Maninkakan",
"mlr": "Vame",
"mls": "Masalit",
"mlu": "To'abaita",
"mlv": "Motlav; Mwotlap",
"mlw": "Moloko",
"mlx": "Malfaxal; Naha'ai",
"mlz": "Malaynon",
"mma": "Mama",
"mmb": "Momina",
"mmc": "Michoacán Mazahua",
"mmd": "Maonan",
"mme": "Mae",
"mmf": "Mundat",
"mmg": "North Ambrym",
"mmh": "Mehináku",
"mmi": "Musar",
"mmj": "Majhwar",
"mmk": "Mukha-Dora",
"mml": "Man Met",
"mmm": "Maii",
"mmn": "Mamanwa",
"mmo": "Mangga Buang",
"mmp": "Siawi",
"mmq": "Musak",
"mmr": "Western Xiangxi Miao",
"mmt": "Malalamai",
"mmu": "Mmaala",
"mmv": "Miriti",
"mmw": "Emae",
"mmx": "Madak",
"mmy": "Migaama",
"mmz": "Mabaale",
"mn": "Mongolian",
"mna": "Mbula",
"mnb": "Muna",
"mnc": "Manchu",
"mnd": "Mondé",
"mne": "Naba",
"mnf": "Mundani",
"mng": "Eastern Mnong",
"mnh": "Mono (Democratic Republic of Congo)",
"mni": "Manipuri",
"mnj": "Munji",
"mnk": "Mandinka",
"mnl": "Tiale",
"mnm": "Mapena",
"mnn": "Southern Mnong",
"mno": "Manobo languages",
"mnp": "Min Bei Chinese",
"mnq": "Minriq",
"mnr": "Mono (USA)",
"mns": "Mansi",
"mnu": "Mer",
"mnv": "Rennell-Bellona",
"mnw": "Mon",
"mnx": "Manikion",
"mny": "Manyawa",
"mnz": "Moni",
"moa": "Mwan",
"moc": "Mocoví",
"mod": "Mobilian",
"moe": "Innu; Montagnais",
"mog": "Mongondow",
"moh": "Mohawk",
"moi": "Mboi",
"moj": "Monzombo",
"mok": "Morori",
"mom": "Mangue",
"moo": "Monom",
"mop": "Mopán Maya",
"moq": "Mor (Bomberai Peninsula)",
"mor": "Moro",
"mos": "Mossi",
"mot": "Barí",
"mou": "Mogum",
"mov": "Mohave",
"mow": "Moi (Congo)",
"mox": "Molima",
"moy": "Shekkacho",
"moz": "Mukulu; Gergiko",
"mpa": "Mpoto",
"mpb": "Malak Malak; Mullukmulluk",
"mpc": "Mangarrayi",
"mpd": "Machinere",
"mpe": "Majang",
"mpg": "Marba",
"mph": "Maung",
"mpi": "Mpade",
"mpj": "Martu Wangka; Wangkajunga",
"mpk": "Mbara (Chad)",
"mpl": "Middle Watut",
"mpm": "Yosondúa Mixtec",
"mpn": "Mindiri",
"mpo": "Miu",
"mpp": "Migabac",
"mpq": "Matís",
"mpr": "Vangunu",
"mps": "Dadibi",
"mpt": "Mian",
"mpu": "Makuráp",
"mpv": "Mungkip",
"mpw": "Mapidian",
"mpx": "Misima-Panaeati",
"mpy": "Mapia",
"mpz": "Mpi",
"mqa": "Maba (Indonesia)",
"mqb": "Mbuko",
"mqc": "Mangole",
"mqe": "Matepi",
"mqf": "Momuna",
"mqg": "Kota Bangun Kutai Malay",
"mqh": "Tlazoyaltepec Mixtec",
"mqi": "Mariri",
"mqj": "Mamasa",
"mqk": "Rajah Kabunsuwan Manobo",
"mql": "Mbelime",
"mqm": "South Marquesan",
"mqn": "Moronene",
"mqo": "Modole",
"mqp": "Manipa",
"mqq": "Minokok",
"mqr": "Mander",
"mqs": "West Makian",
"mqt": "Mok",
"mqu": "Mandari",
"mqv": "Mosimo",
"mqw": "Murupi",
"mqx": "Mamuju",
"mqy": "Manggarai",
"mqz": "Pano",
"mr": "Marathi",
"mra": "Mlabri",
"mrb": "Marino",
"mrc": "Maricopa",
"mrd": "Western Magar",
"mre": "Martha's Vineyard Sign Language",
"mrf": "Elseng",
"mrg": "Mising",
"mrh": "Mara Chin",
"mrj": "Western Mari",
"mrk": "Hmwaveke",
"mrl": "Mortlockese",
"mrm": "Merlav; Mwerlap",
"mrn": "Cheke Holo",
"mro": "Mru",
"mrp": "Morouas",
"mrq": "North Marquesan",
"mrr": "Maria (India)",
"mrs": "Maragus",
"mrt": "Marghi Central",
"mru": "Mono (Cameroon)",
"mrv": "Mangareva",
"mrw": "Maranao",
"mrx": "Maremgi; Dineor",
"mry": "Mandaya",
"mrz": "Marind",
"ms": "Malay (macrolanguage)",
"msb": "Masbatenyo",
"msc": "Sankaran Maninka",
"msd": "Yucatec Maya Sign Language",
"mse": "Musey",
"msf": "Mekwei",
"msg": "Moraid",
"msh": "Masikoro Malagasy",
"msi": "Sabah Malay",
"msj": "Ma (Democratic Republic of Congo)",
"msk": "Mansaka",
"msl": "Molof; Poule",
"msm": "Agusan Manobo",
"msn": "Vurës",
"mso": "Mombum",
"msp": "Maritsauá",
"msq": "Caac",
"msr": "Mongolian Sign Language",
"mss": "West Masela",
"msu": "Musom",
"msv": "Maslam",
"msw": "Mansoanka",
"msx": "Moresada",
"msy": "Aruamu",
"msz": "Momare",
"mt": "Maltese",
"mta": "Cotabato Manobo",
"mtb": "Anyin Morofo",
"mtc": "Munit",
"mtd": "Mualang",
"mte": "Mono (Solomon Islands)",
"mtf": "Murik (Papua New Guinea)",
"mtg": "Una",
"mth": "Munggui",
"mti": "Maiwa (Papua New Guinea)",
"mtj": "Moskona",
"mtk": "Mbe'",
"mtl": "Montol",
"mtm": "Mator",
"mtn": "Matagalpa",
"mto": "Totontepec Mixe",
"mtp": "Wichí Lhamtés Nocten",
"mtq": "Muong",
"mtr": "Mewari",
"mts": "Yora",
"mtt": "Mota",
"mtu": "Tututepec Mixtec",
"mtv": "Asaro'o",
"mtw": "Southern Binukidnon",
"mtx": "Tidaá Mixtec",
"mty": "Nabi",
"mua": "Mundang",
"mub": "Mubi",
"muc": "Ajumbu",
"mud": "Mednyj Aleut",
"mue": "Media Lengua",
"mug": "Musgu",
"muh": "Mündü",
"mui": "Musi",
"muj": "Mabire",
"muk": "Mugom",
"mum": "Maiwala",
"mun": "Munda languages",
"muo": "Nyong",
"mup": "Malvi",
"muq": "Eastern Xiangxi Miao",
"mur": "Murle",
"mus": "Creek",
"mut": "Western Muria",
"muu": "Yaaku",
"muv": "Muthuvan",
"mux": "Bo-Ung",
"muy": "Muyang",
"muz": "Mursi",
"mva": "Manam",
"mvb": "Mattole",
"mvd": "Mamboru",
"mve": "Marwari (Pakistan)",
"mvf": "Peripheral Mongolian",
"mvg": "Yucuañe Mixtec",
"mvh": "Mulgi",
"mvi": "Miyako",
"mvk": "Mekmek",
"mvl": "Mbara (Australia)",
"mvn": "Minaveha",
"mvo": "Marovo",
"mvp": "Duri",
"mvq": "Moere",
"mvr": "Marau",
"mvs": "Massep",
"mvt": "Mpotovoro",
"mvu": "Marfa",
"mvv": "Tagal Murut",
"mvw": "Machinga",
"mvx": "Meoswar",
"mvy": "Indus Kohistani",
"mvz": "Mesqan",
"mwa": "Mwatebu",
"mwb": "Juwal",
"mwc": "Are",
"mwe": "Mwera (Chimwera)",
"mwf": "Murrinh-Patha",
"mwg": "Aiklep",
"mwh": "Mouk-Aria",
"mwi": "Labo; Ninde",
"mwk": "Kita Maninkakan",
"mwl": "Mirandese",
"mwm": "Sar",
"mwn": "Nyamwanga",
"mwo": "Central Maewo",
"mwp": "Kala Lagaw Ya",
"mwq": "Mün Chin",
"mwr": "Marwari",
"mws": "Mwimbi-Muthambi",
"mwt": "Moken",
"mwu": "Mittu",
"mwv": "Mentawai",
"mww": "Hmong Daw",
"mwz": "Moingi",
"mxa": "Northwest Oaxaca Mixtec",
"mxb": "Tezoatlán Mixtec",
"mxc": "Manyika",
"mxd": "Modang",
"mxe": "Mele-Fila",
"mxf": "Malgbe",
"mxg": "Mbangala",
"mxh": "Mvuba",
"mxi": "Mozarabic",
"mxj": "Miju-Mishmi; Geman Deng",
"mxk": "Monumbo",
"mxl": "Maxi Gbe",
"mxm": "Meramera",
"mxn": "Moi (Indonesia)",
"mxo": "Mbowe",
"mxp": "Tlahuitoltepec Mixe",
"mxq": "Juquila Mixe",
"mxr": "Murik (Malaysia)",
"mxs": "Huitepec Mixtec",
"mxt": "Jamiltepec Mixtec",
"mxu": "Mada (Cameroon)",
"mxv": "Metlatónoc Mixtec",
"mxw": "Namo",
"mxx": "Mahou; Mawukakan",
"mxy": "Southeastern Nochixtlán Mixtec",
"mxz": "Central Masela",
"my": "Burmese",
"myb": "Mbay",
"myc": "Mayeka",
"mye": "Myene",
"myf": "Bambassi",
"myg": "Manta",
"myh": "Makah",
"myj": "Mangayat",
"myk": "Mamara Senoufo",
"myl": "Moma",
"mym": "Me'en",
"myn": "Mayan languages",
"myo": "Anfillo",
"myp": "Pirahã",
"myr": "Muniche",
"mys": "Mesmes",
"myu": "Mundurukú",
"myv": "Erzya",
"myw": "Muyuw",
"myx": "Masaaba",
"myy": "Macuna",
"myz": "Classical Mandaic",
"mza": "Santa María Zacatepec Mixtec",
"mzb": "Tumzabt",
"mzc": "Madagascar Sign Language",
"mzd": "Malimba",
"mze": "Morawa",
"mzg": "Monastic Sign Language",
"mzh": "Wichí Lhamtés Güisnay",
"mzi": "Ixcatlán Mazatec",
"mzj": "Manya",
"mzk": "Nigeria Mambila",
"mzl": "Mazatlán Mixe",
"mzm": "Mumuye",
"mzn": "Mazanderani",
"mzo": "Matipuhy",
"mzp": "Movima",
"mzq": "Mori Atas",
"mzr": "Marúbo",
"mzs": "Macanese",
"mzt": "Mintil",
"mzu": "Inapang",
"mzv": "Manza",
"mzw": "Deg",
"mzx": "Mawayana",
"mzy": "Mozambican Sign Language",
"mzz": "Maiadomu",
"na": "Nauru",
"naa": "Namla",
"nab": "Southern Nambikuára",
"nac": "Narak",
"nae": "Naka'ela",
"naf": "Nabak",
"nag": "Naga Pidgin",
"nah": "Nahuatl languages",
"nai": "North American Indian languages",
"naj": "Nalu",
"nak": "Nakanai",
"nal": "Nalik",
"nam": "Ngan'gityemerri",
"nan": "Min Nan Chinese",
"nao": "Naaba",
"nap": "Neapolitan",
"naq": "Khoekhoe; Nama (Namibia)",
"nar": "Iguta",
"nas": "Naasioi",
"nat": "Ca̱hungwa̱rya̱; Hungworo",
"naw": "Nawuri",
"nax": "Nakwi",
"nay": "Ngarrindjeri",
"naz": "Coatepec Nahuatl",
"nb": "Norwegian Bokmål",
"nba": "Nyemba",
"nbb": "Ndoe",
"nbc": "Chang Naga",
"nbd": "Ngbinda",
"nbe": "Konyak Naga",
"nbg": "Nagarchal",
"nbh": "Ngamo",
"nbi": "Mao Naga",
"nbj": "Ngarinyman",
"nbk": "Nake",
"nbm": "Ngbaka Ma'bo",
"nbn": "Kuri",
"nbo": "Nkukoli",
"nbp": "Nnam",
"nbq": "Nggem",
"nbr": "Numana",
"nbs": "Namibian Sign Language",
"nbt": "Na",
"nbu": "Rongmei Naga",
"nbv": "Ngamambo",
"nbw": "Southern Ngbandi",
"nby": "Ningera",
"nca": "Iyo",
"ncb": "Central Nicobarese",
"ncc": "Ponam",
"ncd": "Nachering",
"nce": "Yale",
"ncf": "Notsi",
"ncg": "Nisga'a",
"nch": "Central Huasteca Nahuatl",
"nci": "Classical Nahuatl",
"ncj": "Northern Puebla Nahuatl",
"nck": "Na-kara",
"ncl": "Michoacán Nahuatl",
"ncm": "Nambo",
"ncn": "Nauna",
"nco": "Sibe",
"ncq": "Northern Katang",
"ncr": "Ncane",
"ncs": "Nicaraguan Sign Language",
"nct": "Chothe Naga",
"ncu": "Chumburung",
"ncx": "Central Puebla Nahuatl",
"ncz": "Natchez",
"nd": "North Ndebele",
"nda": "Ndasa",
"ndb": "Kenswei Nsei",
"ndc": "Ndau",
"ndd": "Nde-Nsele-Nta",
"ndf": "Nadruvian",
"ndg": "Ndengereko",
"ndh": "Ndali",
"ndi": "Samba Leko",
"ndj": "Ndamba",
"ndk": "Ndaka",
"ndl": "Ndolo",
"ndm": "Ndam",
"ndn": "Ngundi",
"ndp": "Ndo",
"ndq": "Ndombe",
"ndr": "Ndoola",
"nds": "Low German; Low Saxon",
"ndt": "Ndunga",
"ndu": "Dugun",
"ndv": "Ndut",
"ndw": "Ndobo",
"ndx": "Nduga",
"ndy": "Lutos",
"ndz": "Ndogo",
"ne": "Nepali (macrolanguage)",
"nea": "Eastern Ngad'a",
"neb": "Toura (Côte d'Ivoire)",
"nec": "Nedebang",
"ned": "Nde-Gbite",
"nee": "Nêlêmwa-Nixumwak",
"nef": "Nefamese",
"neg": "Negidal",
"neh": "Nyenkha",
"nei": "Neo-Hittite",
"nej": "Neko",
"nek": "Neku",
"nem": "Nemi",
"nen": "Nengone",
"neo": "Ná-Meo",
"neq": "North Central Mixe",
"ner": "Yahadian",
"nes": "Bhoti Kinnauri",
"net": "Nete",
"neu": "Neo",
"nev": "Nyaheun",
"new": "Newari; Nepal Bhasa",
"nex": "Neme",
"ney": "Neyo",
"nez": "Nez Perce",
"nfa": "Dhao",
"nfd": "Ahwai",
"nfl": "Ayiwo; Äiwoo",
"nfr": "Nafaanra",
"nfu": "Mfumte",
"ng": "Ndonga",
"nga": "Ngbaka",
"ngb": "Northern Ngbandi",
"ngc": "Ngombe (Democratic Republic of Congo)",
"ngd": "Ngando (Central African Republic)",
"nge": "Ngemba",
"ngf": "Trans-New Guinea languages",
"ngg": "Ngbaka Manza",
"ngh": "Nǁng",
"ngi": "Ngizim",
"ngj": "Ngie",
"ngk": "Dalabon",
"ngl": "Lomwe",
"ngm": "Ngatik Men's Creole",
"ngn": "Ngwo",
"ngp": "Ngulu",
"ngq": "Ngurimi; Ngoreme",
"ngr": "Engdewu",
"ngs": "Gvoko",
"ngt": "Kriang; Ngeq",
"ngu": "Guerrero Nahuatl",
"ngv": "Nagumi",
"ngw": "Ngwaba",
"ngx": "Nggwahyi",
"ngy": "Tibea",
"ngz": "Ngungwel",
"nha": "Nhanda",
"nhb": "Beng",
"nhc": "Tabasco Nahuatl",
"nhd": "Chiripá; Ava Guaraní",
"nhe": "Eastern Huasteca Nahuatl",
"nhf": "Nhuwala",
"nhg": "Tetelcingo Nahuatl",
"nhh": "Nahari",
"nhi": "Zacatlán-Ahuacatlán-Tepetzintla Nahuatl",
"nhk": "Isthmus-Cosoleacaque Nahuatl",
"nhm": "Morelos Nahuatl",
"nhn": "Central Nahuatl",
"nho": "Takuu",
"nhp": "Isthmus-Pajapan Nahuatl",
"nhq": "Huaxcaleca Nahuatl",
"nhr": "Naro",
"nht": "Ometepec Nahuatl",
"nhu": "Noone",
"nhv": "Temascaltepec Nahuatl",
"nhw": "Western Huasteca Nahuatl",
"nhx": "Isthmus-Mecayapan Nahuatl",
"nhy": "Northern Oaxaca Nahuatl",
"nhz": "Santa María La Alta Nahuatl",
"nia": "Nias",
"nib": "Nakame",
"nic": "Niger-Kordofanian languages",
"nid": "Ngandi",
"nie": "Niellim",
"nif": "Nek",
"nig": "Ngalakgan",
"nih": "Nyiha (Tanzania)",
"nii": "Nii",
"nij": "Ngaju",
"nik": "Southern Nicobarese",
"nil": "Nila",
"nim": "Nilamba",
"nin": "Ninzo",
"nio": "Nganasan",
"niq": "Nandi",
"nir": "Nimboran",
"nis": "Nimi",
"nit": "Southeastern Kolami",
"niu": "Niuean",
"niv": "Gilyak",
"niw": "Nimo",
"nix": "Hema",
"niy": "Ngiti",
"niz": "Ningil",
"nja": "Nzanyi",
"njb": "Nocte Naga",
"njd": "Ndonde Hamba",
"njh": "Lotha Naga",
"nji": "Gudanji",
"njj": "Njen",
"njl": "Njalgulgule",
"njm": "Angami Naga",
"njn": "Liangmai Naga",
"njo": "Ao Naga",
"njr": "Njerep",
"njs": "Nisa",
"njt": "Ndyuka-Trio Pidgin",
"nju": "Ngadjunmaya",
"njx": "Kunyi",
"njy": "Njyem",
"njz": "Nyishi",
"nka": "Nkoya",
"nkb": "Khoibu Naga",
"nkc": "Nkongho",
"nkd": "Koireng",
"nke": "Duke",
"nkf": "Inpui Naga",
"nkg": "Nekgini",
"nkh": "Khezha Naga",
"nki": "Thangal Naga",
"nkj": "Nakai",
"nkk": "Nokuku",
"nkm": "Namat",
"nkn": "Nkangala",
"nko": "Nkonya",
"nkp": "Niuatoputapu",
"nkq": "Nkami",
"nkr": "Nukuoro",
"nks": "North Asmat",
"nkt": "Nyika (Tanzania)",
"nku": "Bouna Kulango",
"nkv": "Nyika (Malawi and Zambia)",
"nkw": "Nkutu",
"nkx": "Nkoroo",
"nkz": "Nkari",
"nl": "Dutch; Flemish",
"nla": "Ngombale",
"nlc": "Nalca",
"nle": "East Nyala",
"nlg": "Gela",
"nli": "Grangali",
"nlj": "Nyali",
"nlk": "Ninia Yali",
"nll": "Nihali",
"nlm": "Mankiyali",
"nlo": "Ngul",
"nlq": "Lao Naga",
"nlu": "Nchumbulu",
"nlv": "Orizaba Nahuatl",
"nlw": "Walangama",
"nlx": "Nahali",
"nly": "Nyamal",
"nlz": "Nalögo",
"nma": "Maram Naga",
"nmb": "Big Nambas; V'ënen Taut",
"nmc": "Ngam",
"nmd": "Ndumu",
"nme": "Mzieme Naga",
"nmf": "Tangkhul Naga (India)",
"nmg": "Kwasio",
"nmh": "Monsang Naga",
"nmi": "Nyam",
"nmj": "Ngombe (Central African Republic)",
"nmk": "Namakura",
"nml": "Ndemli",
"nmm": "Manangba",
"nmn": "ǃXóõ",
"nmo": "Moyon Naga",
"nmp": "Nimanbur",
"nmq": "Nambya",
"nmr": "Nimbari",
"nms": "Letemboi",
"nmt": "Namonuito",
"nmu": "Northeast Maidu",
"nmv": "Ngamini",
"nmw": "Nimoa; Rifao",
"nmx": "Nama (Papua New Guinea)",
"nmy": "Namuyi",
"nmz": "Nawdm",
"nn": "Norwegian Nynorsk",
"nna": "Nyangumarta",
"nnb": "Nande",
"nnc": "Nancere",
"nnd": "West Ambae",
"nne": "Ngandyera",
"nnf": "Ngaing",
"nng": "Maring Naga",
"nnh": "Ngiemboon",
"nni": "North Nuaulu",
"nnj": "Nyangatom",
"nnk": "Nankina",
"nnl": "Northern Rengma Naga",
"nnm": "Namia",
"nnn": "Ngete",
"nnp": "Wancho Naga",
"nnq": "Ngindo",
"nnr": "Narungga",
"nnt": "Nanticoke",
"nnu": "Dwang",
"nnv": "Nugunu (Australia)",
"nnw": "Southern Nuni",
"nny": "Nyangga",
"nnz": "Nda'nda'",
"no": "Norwegian",
"noa": "Woun Meu",
"noc": "Nuk",
"nod": "Northern Thai",
"noe": "Nimadi",
"nof": "Nomane",
"nog": "Nogai",
"noh": "Nomu",
"noi": "Noiri",
"noj": "Nonuya",
"nok": "Nooksack",
"nol": "Nomlaki",
"nom": "Nocamán",
"non": "Old Norse",
"nop": "Numanggang",
"noq": "Ngongo",
"nos": "Eastern Nisu",
"not": "Nomatsiguenga",
"nou": "Ewage-Notu",
"nov": "Novial",
"now": "Nyambo",
"noy": "Noy",
"noz": "Nayi",
"npa": "Nar Phu",
"npb": "Nupbikha",
"npg": "Ponyo-Gongwang Naga",
"nph": "Phom Naga",
"npi": "Nepali (individual language)",
"npl": "Southeastern Puebla Nahuatl",
"npn": "Mondropolon",
"npo": "Pochuri Naga",
"nps": "Nipsan",
"npu": "Puimei Naga",
"npx": "Noipx",
"npy": "Napu",
"nqg": "Southern Nago",
"nqk": "Kura Ede Nago",
"nql": "Ngendelengo",
"nqm": "Ndom",
"nqn": "Nen",
"nqo": "N'Ko; N’Ko",
"nqq": "Kyan-Karyaw Naga",
"nqt": "Nteng",
"nqy": "Akyaung Ari Naga",
"nr": "South Ndebele",
"nra": "Ngom",
"nrb": "Nara",
"nrc": "Noric",
"nre": "Southern Rengma Naga",
"nrf": "Jèrriais; Guernésiais",
"nrg": "Narango",
"nri": "Chokri Naga",
"nrk": "Ngarla",
"nrl": "Ngarluma",
"nrm": "Narom",
"nrn": "Norn",
"nrp": "North Picene",
"nrr": "Norra; Nora",
"nrt": "Northern Kalapuya",
"nru": "Narua",
"nrx": "Ngurmbur",
"nrz": "Lala",
"nsa": "Sangtam Naga",
"nsb": "Lower Nossob",
"nsc": "Nshi",
"nsd": "Southern Nisu",
"nse": "Nsenga",
"nsf": "Northwestern Nisu",
"nsg": "Ngasa",
"nsh": "Ngoshie",
"nsi": "Nigerian Sign Language",
"nsk": "Naskapi",
"nsl": "Norwegian Sign Language",
"nsm": "Sumi Naga",
"nsn": "Nehan",
"nso": "Pedi; Northern Sotho; Sepedi",
"nsp": "Nepalese Sign Language",
"nsq": "Northern Sierra Miwok",
"nsr": "Maritime Sign Language",
"nss": "Nali",
"nst": "Tase Naga",
"nsu": "Sierra Negra Nahuatl",
"nsv": "Southwestern Nisu",
"nsw": "Navut",
"nsx": "Nsongo",
"nsy": "Nasal",
"nsz": "Nisenan",
"ntd": "Northern Tidung",
"nte": "Nathembo",
"ntg": "Ngantangarra",
"nti": "Natioro",
"ntj": "Ngaanyatjarra",
"ntk": "Ikoma-Nata-Isenye",
"ntm": "Nateni",
"nto": "Ntomba",
"ntp": "Northern Tepehuan",
"ntr": "Delo",
"ntu": "Natügu",
"ntw": "Nottoway",
"ntx": "Tangkhul Naga (Myanmar)",
"nty": "Mantsi",
"ntz": "Natanzi",
"nua": "Yuanga",
"nub": "Nubian languages",
"nuc": "Nukuini",
"nud": "Ngala",
"nue": "Ngundu",
"nuf": "Nusu",
"nug": "Nungali",
"nuh": "Ndunda",
"nui": "Ngumbi",
"nuj": "Nyole",
"nuk": "Nuu-chah-nulth; Nuuchahnulth",
"nul": "Nusa Laut",
"num": "Niuafo'ou",
"nun": "Anong",
"nuo": "Nguôn",
"nup": "Nupe-Nupe-Tako",
"nuq": "Nukumanu",
"nur": "Nukuria",
"nus": "Nuer",
"nut": "Nung (Viet Nam)",
"nuu": "Ngbundu",
"nuv": "Northern Nuni",
"nuw": "Nguluwan",
"nux": "Mehek",
"nuy": "Nunggubuyu",
"nuz": "Tlamacazapa Nahuatl",
"nv": "Navajo; Navaho",
"nvh": "Nasarian",
"nvm": "Namiae",
"nvo": "Nyokon",
"nwa": "Nawathinehena",
"nwb": "Nyabwa",
"nwc": "Classical Newari; Classical Nepal Bhasa; Old Newari",
"nwe": "Ngwe",
"nwg": "Ngayawung",
"nwi": "Southwest Tanna",
"nwm": "Nyamusa-Molo",
"nwo": "Nauo",
"nwr": "Nawaru",
"nww": "Ndwewe",
"nwx": "Middle Newar",
"nwy": "Nottoway-Meherrin",
"nxa": "Nauete",
"nxd": "Ngando (Democratic Republic of Congo)",
"nxe": "Nage",
"nxg": "Ngad'a",
"nxi": "Nindi",
"nxk": "Koki Naga",
"nxl": "South Nuaulu",
"nxm": "Numidian",
"nxn": "Ngawun",
"nxo": "Ndambomo",
"nxq": "Naxi",
"nxr": "Ninggerum",
"nxx": "Nafri",
"ny": "Nyanja; Chewa; Chichewa",
"nyb": "Nyangbo",
"nyc": "Nyanga-li",
"nyd": "Nyore; Olunyole",
"nye": "Nyengo",
"nyf": "Giryama; Kigiryama",
"nyg": "Nyindu",
"nyh": "Nyikina",
"nyi": "Ama (Sudan)",
"nyj": "Nyanga",
"nyk": "Nyaneka",
"nyl": "Nyeu",
"nym": "Nyamwezi",
"nyn": "Nyankole",
"nyo": "Nyoro",
"nyp": "Nyang'i",
"nyq": "Nayini",
"nyr": "Nyiha (Malawi)",
"nys": "Nyungar",
"nyt": "Nyawaygi",
"nyu": "Nyungwe",
"nyv": "Nyulnyul",
"nyw": "Nyaw",
"nyx": "Nganyaywana",
"nyy": "Nyakyusa-Ngonde",
"nza": "Tigon Mbembe",
"nzb": "Njebi",
"nzd": "Nzadi",
"nzi": "Nzima",
"nzk": "Nzakara",
"nzm": "Zeme Naga",
"nzs": "New Zealand Sign Language",
"nzu": "Teke-Nzikou",
"nzy": "Nzakambay",
"nzz": "Nanga Dama Dogon",
"oaa": "Orok",
"oac": "Oroch",
"oar": "Old Aramaic (up to 700 BCE); Ancient Aramaic (up to 700 BCE)",
"oav": "Old Avar",
"obi": "Obispeño",
"obk": "Southern Bontok",
"obl": "Oblo",
"obm": "Moabite",
"obo": "Obo Manobo",
"obr": "Old Burmese",
"obt": "Old Breton",
"obu": "Obulom",
"oc": "Occitan (post 1500)",
"oca": "Ocaina",
"och": "Old Chinese",
"ocm": "Old Cham",
"oco": "Old Cornish",
"ocu": "Atzingo Matlatzinca",
"oda": "Odut",
"odk": "Od",
"odt": "Old Dutch",
"odu": "Odual",
"ofo": "Ofo",
"ofs": "Old Frisian",
"ofu": "Efutop",
"ogb": "Ogbia",
"ogc": "Ogbah",
"oge": "Old Georgian",
"ogg": "Ogbogolo",
"ogo": "Khana",
"ogu": "Ogbronuagum",
"oht": "Old Hittite",
"ohu": "Old Hungarian",
"oia": "Oirata",
"oie": "Okolie",
"oin": "Inebu One",
"oj": "Ojibwa",
"ojb": "Northwestern Ojibwa",
"ojc": "Central Ojibwa",
"ojg": "Eastern Ojibwa",
"ojp": "Old Japanese",
"ojs": "Severn Ojibwa",
"ojv": "Ontong Java",
"ojw": "Western Ojibwa",
"oka": "Okanagan",
"okb": "Okobo",
"okc": "Kobo",
"okd": "Okodia",
"oke": "Okpe (Southwestern Edo)",
"okg": "Koko Babangk",
"okh": "Koresh-e Rostam",
"oki": "Okiek",
"okj": "Oko-Juwoi",
"okk": "Kwamtim One",
"okl": "Old Kentish Sign Language",
"okm": "Middle Korean (10th-16th cent.)",
"okn": "Oki-No-Erabu",
"oko": "Old Korean (3rd-9th cent.)",
"okr": "Kirike",
"oks": "Oko-Eni-Osayen",
"oku": "Oku",
"okv": "Orokaiva",
"okx": "Okpe (Northwestern Edo)",
"okz": "Old Khmer",
"ola": "Walungge",
"old": "Mochi",
"ole": "Olekha",
"olk": "Olkol",
"olm": "Oloma",
"olo": "Livvi",
"olr": "Olrat",
"olt": "Old Lithuanian",
"olu": "Kuvale",
"om": "Oromo",
"oma": "Omaha-Ponca",
"omb": "East Ambae",
"omc": "Mochica",
"omg": "Omagua",
"omi": "Omi",
"omk": "Omok",
"oml": "Ombo",
"omn": "Minoan",
"omo": "Utarmbung",
"omp": "Old Manipuri",
"omq": "Oto-Manguean languages",
"omr": "Old Marathi",
"omt": "Omotik",
"omu": "Omurano",
"omv": "Omotic languages",
"omw": "South Tairora",
"omx": "Old Mon",
"omy": "Old Malay",
"ona": "Ona",
"onb": "Lingao",
"one": "Oneida",
"ong": "Olo",
"oni": "Onin",
"onj": "Onjob",
"onk": "Kabore One",
"onn": "Onobasulu",
"ono": "Onondaga",
"onp": "Sartang",
"onr": "Northern One",
"ons": "Ono",
"ont": "Ontenu",
"onu": "Unua",
"onw": "Old Nubian",
"onx": "Onin Based Pidgin",
"ood": "Tohono O'odham",
"oog": "Ong",
"oon": "Önge",
"oor": "Oorlams",
"oos": "Old Ossetic",
"opa": "Okpamheri",
"opk": "Kopkaka",
"opm": "Oksapmin",
"opo": "Opao",
"opt": "Opata",
"opy": "Ofayé",
"or": "Oriya (macrolanguage); Odia (macrolanguage)",
"ora": "Oroha",
"orc": "Orma",
"ore": "Orejón",
"org": "Oring",
"orh": "Oroqen",
"orn": "Orang Kanaq",
"oro": "Orokolo",
"orr": "Oruma",
"ors": "Orang Seletar",
"ort": "Adivasi Oriya",
"oru": "Ormuri",
"orv": "Old Russian",
"orw": "Oro Win",
"orx": "Oro",
"ory": "Odia (individual language); Oriya (individual language)",
"orz": "Ormu",
"os": "Ossetian; Ossetic",
"osa": "Osage",
"osc": "Oscan",
"osi": "Osing",
"osn": "Old Sundanese",
"oso": "Ososo",
"osp": "Old Spanish",
"ost": "Osatu",
"osu": "Southern One",
"osx": "Old Saxon",
"ota": "Ottoman Turkish (1500-1928)",
"otb": "Old Tibetan",
"otd": "Ot Danum",
"ote": "Mezquital Otomi",
"oti": "Oti",
"otk": "Old Turkish",
"otl": "Tilapa Otomi",
"otm": "Eastern Highland Otomi",
"otn": "Tenango Otomi",
"oto": "Otomian languages",
"otq": "Querétaro Otomi",
"otr": "Otoro",
"ots": "Estado de México Otomi",
"ott": "Temoaya Otomi",
"otu": "Otuke",
"otw": "Ottawa",
"otx": "Texcatepec Otomi",
"oty": "Old Tamil",
"otz": "Ixtenco Otomi",
"oua": "Tagargrent",
"oub": "Glio-Oubi",
"oue": "Oune",
"oui": "Old Uighur",
"oum": "Ouma",
"ovd": "Elfdalian; Övdalian",
"owi": "Owiniga",
"owl": "Old Welsh",
"oyb": "Oy",
"oyd": "Oyda",
"oym": "Wayampi",
"oyy": "Oya'oya",
"ozm": "Koonzime",
"pa": "Panjabi; Punjabi",
"paa": "Papuan languages",
"pab": "Parecís",
"pac": "Pacoh",
"pad": "Paumarí",
"pae": "Pagibete",
"paf": "Paranawát",
"pag": "Pangasinan",
"pah": "Tenharim",
"pai": "Pe",
"pak": "Parakanã",
"pal": "Pahlavi",
"pam": "Pampanga; Kapampangan",
"pao": "Northern Paiute",
"pap": "Papiamento",
"paq": "Parya",
"par": "Panamint; Timbisha",
"pas": "Papasena",
"pau": "Palauan",
"pav": "Pakaásnovos",
"paw": "Pawnee",
"pax": "Pankararé",
"pay": "Pech",
"paz": "Pankararú",
"pbb": "Páez",
"pbc": "Patamona",
"pbe": "Mezontla Popoloca",
"pbf": "Coyotepec Popoloca",
"pbg": "Paraujano",
"pbh": "E'ñapa Woromaipu",
"pbi": "Parkwa",
"pbl": "Mak (Nigeria)",
"pbm": "Puebla Mazatec",
"pbn": "Kpasam",
"pbo": "Papel",
"pbp": "Badyara",
"pbr": "Pangwa",
"pbs": "Central Pame",
"pbt": "Southern Pashto",
"pbu": "Northern Pashto",
"pbv": "Pnar",
"pby": "Pyu (Papua New Guinea)",
"pca": "Santa Inés Ahuatempan Popoloca",
"pcb": "Pear",
"pcc": "Bouyei",
"pcd": "Picard",
"pce": "Ruching Palaung",
"pcf": "Paliyan",
"pcg": "Paniya",
"pch": "Pardhan",
"pci": "Duruwa",
"pcj": "Parenga",
"pck": "Paite Chin",
"pcl": "Pardhi",
"pcm": "Nigerian Pidgin",
"pcn": "Piti",
"pcp": "Pacahuara",
"pcw": "Pyapun",
"pda": "Anam",
"pdc": "Pennsylvania German",
"pdi": "Pa Di",
"pdn": "Podena; Fedan",
"pdo": "Padoe",
"pdt": "Plautdietsch",
"pdu": "Kayan",
"pea": "Peranakan Indonesian",
"peb": "Eastern Pomo",
"ped": "Mala (Papua New Guinea)",
"pee": "Taje",
"pef": "Northeastern Pomo",
"peg": "Pengo",
"peh": "Bonan",
"pei": "Chichimeca-Jonaz",
"pej": "Northern Pomo",
"pek": "Penchal",
"pel": "Pekal",
"pem": "Phende",
"peo": "Old Persian (ca. 600-400 B.C.)",
"pep": "Kunja",
"peq": "Southern Pomo",
"pes": "Iranian Persian",
"pev": "Pémono",
"pex": "Petats",
"pey": "Petjo",
"pez": "Eastern Penan",
"pfa": "Pááfang",
"pfe": "Pere",
"pfl": "Pfaelzisch",
"pga": "Sudanese Creole Arabic",
"pgd": "Gāndhārī",
"pgg": "Pangwali",
"pgi": "Pagi",
"pgk": "Rerep",
"pgl": "Primitive Irish",
"pgn": "Paelignian",
"pgs": "Pangseng",
"pgu": "Pagu",
"pgz": "Papua New Guinean Sign Language",
"pha": "Pa-Hng",
"phd": "Phudagi",
"phg": "Phuong",
"phh": "Phukha",
"phi": "Philippine languages",
"phj": "Pahari",
"phk": "Phake",
"phl": "Phalura; Palula",
"phm": "Phimbi",
"phn": "Phoenician",
"pho": "Phunoi",
"phq": "Phana'",
"phr": "Pahari-Potwari",
"pht": "Phu Thai",
"phu": "Phuan",
"phv": "Pahlavani",
"phw": "Phangduwali",
"pi": "Pali",
"pia": "Pima Bajo",
"pib": "Yine",
"pic": "Pinji",
"pid": "Piaroa",
"pie": "Piro",
"pif": "Pingelapese",
"pig": "Pisabo",
"pih": "Pitcairn-Norfolk",
"pij": "Pijao",
"pil": "Yom",
"pim": "Powhatan",
"pin": "Piame",
"pio": "Piapoco",
"pip": "Pero",
"pir": "Piratapuyo",
"pis": "Pijin",
"pit": "Pitta Pitta",
"piu": "Pintupi-Luritja",
"piv": "Pileni; Vaeakau-Taumako",
"piw": "Pimbwe",
"pix": "Piu",
"piy": "Piya-Kwonci",
"piz": "Pije",
"pjt": "Pitjantjatjara",
"pka": "Ardhamāgadhī Prākrit",
"pkb": "Pokomo; Kipfokomo",
"pkc": "Paekche",
"pkg": "Pak-Tong",
"pkh": "Pankhu",
"pkn": "Pakanha",
"pko": "Pökoot",
"pkp": "Pukapuka",
"pkr": "Attapady Kurumba",
"pks": "Pakistan Sign Language",
"pkt": "Maleng",
"pku": "Paku",
"pl": "Polish",
"pla": "Miani",
"plb": "Polonombauk",
"plc": "Central Palawano",
"pld": "Polari",
"ple": "Palu'e",
"plf": "Central Malayo-Polynesian languages",
"plg": "Pilagá",
"plh": "Paulohi",
"plj": "Polci",
"plk": "Kohistani Shina",
"pll": "Shwe Palaung",
"pln": "Palenquero",
"plo": "Oluta Popoluca",
"plq": "Palaic",
"plr": "Palaka Senoufo",
"pls": "San Marcos Tlacoyalco Popoloca; San Marcos Tlalcoyalco Popoloca",
"plt": "Plateau Malagasy",
"plu": "Palikúr",
"plv": "Southwest Palawano",
"plw": "Brooke's Point Palawano",
"ply": "Bolyu",
"plz": "Paluan",
"pma": "Paama",
"pmb": "Pambia",
"pmd": "Pallanganmiddang",
"pme": "Pwaamei",
"pmf": "Pamona",
"pmh": "Māhārāṣṭri Prākrit",
"pmi": "Northern Pumi",
"pmj": "Southern Pumi",
"pmk": "Pamlico",
"pml": "Lingua Franca",
"pmm": "Pomo",
"pmn": "Pam",
"pmo": "Pom",
"pmq": "Northern Pame",
"pmr": "Paynamar",
"pms": "Piemontese",
"pmt": "Tuamotuan",
"pmw": "Plains Miwok",
"pmx": "Poumei Naga",
"pmy": "Papuan Malay",
"pmz": "Southern Pame",
"pna": "Punan Bah-Biau",
"pnb": "Western Panjabi",
"pnc": "Pannei",
"pnd": "Mpinda",
"pne": "Western Penan",
"png": "Pangu; Pongu",
"pnh": "Penrhyn",
"pni": "Aoheng",
"pnj": "Pinjarup",
"pnk": "Paunaka",
"pnl": "Paleni",
"pnm": "Punan Batu 1",
"pnn": "Pinai-Hagahai",
"pno": "Panobo",
"pnp": "Pancana",
"pnq": "Pana (Burkina Faso)",
"pnr": "Panim",
"pns": "Ponosakan",
"pnt": "Pontic",
"pnu": "Jiongnai Bunu",
"pnv": "Pinigura",
"pnw": "Banyjima; Panytyima",
"pnx": "Phong-Kniang",
"pny": "Pinyin",
"pnz": "Pana (Central African Republic)",
"poc": "Poqomam",
"poe": "San Juan Atzingo Popoloca",
"pof": "Poke",
"pog": "Potiguára",
"poh": "Poqomchi'",
"poi": "Highland Popoluca",
"pok": "Pokangá",
"pom": "Southeastern Pomo",
"pon": "Pohnpeian",
"poo": "Central Pomo",
"pop": "Pwapwâ",
"poq": "Texistepec Popoluca",
"pos": "Sayula Popoluca",
"pot": "Potawatomi",
"pov": "Upper Guinea Crioulo",
"pow": "San Felipe Otlaltepec Popoloca",
"pox": "Polabian",
"poy": "Pogolo",
"poz": "Malayo-Polynesian languages",
"ppe": "Papi",
"ppi": "Paipai",
"ppk": "Uma",
"ppl": "Pipil; Nicarao",
"ppm": "Papuma",
"ppn": "Papapana",
"ppo": "Folopa",
"ppp": "Pelende",
"ppq": "Pei",
"pps": "San Luís Temalacayuca Popoloca",
"ppt": "Pare",
"ppu": "Papora",
"pqa": "Pa'a",
"pqe": "Eastern Malayo-Polynesian languages",
"pqm": "Malecite-Passamaquoddy",
"pqw": "Western Malayo-Polynesian languages",
"pra": "Prakrit languages",
"prc": "Parachi",
"prd": "Parsi-Dari",
"pre": "Principense",
"prf": "Paranan",
"prg": "Prussian",
"prh": "Porohanon",
"pri": "Paicî",
"prk": "Parauk",
"prl": "Peruvian Sign Language",
"prm": "Kibiri",
"prn": "Prasuni",
"pro": "Old Provençal (to 1500); Old Occitan (to 1500)",
"prp": "Parsi",
"prq": "Ashéninka Perené",
"prr": "Puri",
"prs": "Dari; Afghan Persian",
"prt": "Phai",
"pru": "Puragi",
"prw": "Parawen",
"prx": "Purik",
"prz": "Providencia Sign Language",
"ps": "Pushto; Pashto",
"psa": "Asue Awyu",
"psc": "Iranian Sign Language; Persian Sign Language",
"psd": "Plains Indian Sign Language",
"pse": "Central Malay",
"psg": "Penang Sign Language",
"psh": "Southwest Pashai; Southwest Pashayi",
"psi": "Southeast Pashai; Southeast Pashayi",
"psl": "Puerto Rican Sign Language",
"psm": "Pauserna",
"psn": "Panasuan",
"pso": "Polish Sign Language",
"psp": "Philippine Sign Language",
"psq": "Pasi",
"psr": "Portuguese Sign Language",
"pss": "Kaulong",
"pst": "Central Pashto",
"psu": "Sauraseni Prākrit",
"psw": "Port Sandwich",
"psy": "Piscataway",
"pt": "Portuguese",
"pta": "Pai Tavytera",
"pth": "Pataxó Hã-Ha-Hãe",
"pti": "Pindiini; Wangkatha",
"ptn": "Patani",
"pto": "Zo'é",
"ptp": "Patep",
"ptq": "Pattapu",
"ptr": "Piamatsina",
"ptt": "Enrekang",
"ptu": "Bambam",
"ptv": "Port Vato",
"ptw": "Pentlatch",
"pty": "Pathiya",
"pua": "Western Highland Purepecha",
"pub": "Purum",
"puc": "Punan Merap",
"pud": "Punan Aput",
"pue": "Puelche",
"puf": "Punan Merah",
"pug": "Phuie",
"pui": "Puinave",
"puj": "Punan Tubu",
"pum": "Puma",
"puo": "Puoc",
"pup": "Pulabu",
"puq": "Puquina",
"pur": "Puruborá",
"put": "Putoh",
"puu": "Punu",
"puw": "Puluwatese",
"pux": "Puare",
"puy": "Purisimeño",
"pwa": "Pawaia",
"pwb": "Panawa",
"pwg": "Gapapaiwa",
"pwi": "Patwin",
"pwm": "Molbog",
"pwn": "Paiwan",
"pwo": "Pwo Western Karen",
"pwr": "Powari",
"pww": "Pwo Northern Karen",
"pxm": "Quetzaltepec Mixe",
"pye": "Pye Krumen",
"pym": "Fyam",
"pyn": "Poyanáwa",
"pys": "Paraguayan Sign Language; Lengua de Señas del Paraguay",
"pyu": "Puyuma",
"pyx": "Pyu (Myanmar)",
"pyy": "Pyen",
"pzh": "Pazeh",
"pzn": "Jejara Naga; Para Naga",
"qu": "Quechua",
"qua": "Quapaw",
"qub": "Huallaga Huánuco Quechua",
"quc": "K'iche'; Quiché",
"qud": "Calderón Highland Quichua",
"quf": "Lambayeque Quechua",
"qug": "Chimborazo Highland Quichua",
"quh": "South Bolivian Quechua",
"qui": "Quileute",
"quk": "Chachapoyas Quechua",
"qul": "North Bolivian Quechua",
"qum": "Sipacapense",
"qun": "Quinault",
"qup": "Southern Pastaza Quechua",
"quq": "Quinqui",
"qur": "Yanahuanca Pasco Quechua",
"qus": "Santiago del Estero Quichua",
"quv": "Sacapulteco",
"quw": "Tena Lowland Quichua",
"qux": "Yauyos Quechua",
"quy": "Ayacucho Quechua",
"quz": "Cusco Quechua",
"qva": "Ambo-Pasco Quechua",
"qvc": "Cajamarca Quechua",
"qve": "Eastern Apurímac Quechua",
"qvh": "Huamalíes-Dos de Mayo Huánuco Quechua",
"qvi": "Imbabura Highland Quichua",
"qvj": "Loja Highland Quichua",
"qvl": "Cajatambo North Lima Quechua",
"qvm": "Margos-Yarowilca-Lauricocha Quechua",
"qvn": "North Junín Quechua",
"qvo": "Napo Lowland Quechua",
"qvp": "Pacaraos Quechua",
"qvs": "San Martín Quechua",
"qvw": "Huaylla Wanca Quechua",
"qvy": "Queyu",
"qvz": "Northern Pastaza Quichua",
"qwa": "Corongo Ancash Quechua",
"qwc": "Classical Quechua",
"qwe": "Quechuan (family)",
"qwh": "Huaylas Ancash Quechua",
"qwm": "Kuman (Russia)",
"qws": "Sihuas Ancash Quechua",
"qwt": "Kwalhioqua-Tlatskanai",
"qxa": "Chiquián Ancash Quechua",
"qxc": "Chincha Quechua",
"qxh": "Panao Huánuco Quechua",
"qxl": "Salasaca Highland Quichua",
"qxn": "Northern Conchucos Ancash Quechua",
"qxo": "Southern Conchucos Ancash Quechua",
"qxp": "Puno Quechua",
"qxq": "Qashqa'i",
"qxr": "Cañar Highland Quichua",
"qxs": "Southern Qiang",
"qxt": "Santa Ana de Tusi Pasco Quechua",
"qxu": "Arequipa-La Unión Quechua",
"qxw": "Jauja Wanca Quechua",
"qya": "Quenya",
"qyp": "Quiripi",
"raa": "Dungmali",
"rab": "Camling",
"rac": "Rasawa",
"rad": "Rade",
"raf": "Western Meohang",
"rag": "Logooli; Lulogooli",
"rah": "Rabha",
"rai": "Ramoaaina",
"raj": "Rajasthani",
"rak": "Tulu-Bohuai",
"ral": "Ralte",
"ram": "Canela",
"ran": "Riantana",
"rao": "Rao",
"rap": "Rapanui",
"raq": "Saam",
"rar": "Rarotongan; Cook Islands Maori",
"ras": "Tegali",
"rat": "Razajerdi",
"rau": "Raute",
"rav": "Sampang",
"raw": "Rawang",
"rax": "Rang",
"ray": "Rapa",
"raz": "Rahambuu",
"rbb": "Rumai Palaung",
"rbk": "Northern Bontok",
"rbl": "Miraya Bikol",
"rbp": "Barababaraba",
"rcf": "Réunion Creole French",
"rdb": "Rudbari",
"rea": "Rerau",
"reb": "Rembong",
"ree": "Rejang Kayan",
"reg": "Kara (Tanzania)",
"rei": "Reli",
"rej": "Rejang",
"rel": "Rendille",
"rem": "Remo",
"ren": "Rengao",
"rer": "Rer Bare",
"res": "Reshe",
"ret": "Retta",
"rey": "Reyesano",
"rga": "Roria",
"rge": "Romano-Greek",
"rgk": "Rangkas",
"rgn": "Romagnol",
"rgr": "Resígaro",
"rgs": "Southern Roglai",
"rgu": "Ringgou",
"rhg": "Rohingya",
"rhp": "Yahang",
"ria": "Riang (India)",
"rib": "Bribri Sign Language",
"rif": "Tarifit",
"ril": "Riang Lang; Riang (Myanmar)",
"rim": "Nyaturu",
"rin": "Nungu",
"rir": "Ribun",
"rit": "Ritharrngu",
"riu": "Riung",
"rjg": "Rajong",
"rji": "Raji",
"rjs": "Rajbanshi",
"rka": "Kraol",
"rkb": "Rikbaktsa",
"rkh": "Rakahanga-Manihiki",
"rki": "Rakhine",
"rkm": "Marka",
"rkt": "Rangpuri; Kamta",
"rkw": "Arakwal",
"rm": "Romansh",
"rma": "Rama",
"rmb": "Rembarrnga",
"rmc": "Carpathian Romani",
"rmd": "Traveller Danish",
"rme": "Angloromani",
"rmf": "Kalo Finnish Romani",
"rmg": "Traveller Norwegian",
"rmh": "Murkim",
"rmi": "Lomavren",
"rmk": "Romkun",
"rml": "Baltic Romani",
"rmm": "Roma",
"rmn": "Balkan Romani",
"rmo": "Sinte Romani",
"rmp": "Rempi",
"rmq": "Caló",
"rms": "Romanian Sign Language",
"rmt": "Domari",
"rmu": "Tavringer Romani",
"rmv": "Romanova",
"rmw": "Welsh Romani",
"rmx": "Romam",
"rmy": "Vlax Romani",
"rmz": "Marma",
"rn": "Rundi",
"rnb": "Brunca Sign Language",
"rnd": "Ruund",
"rng": "Ronga",
"rnl": "Ranglong",
"rnn": "Roon",
"rnp": "Rongpo",
"rnr": "Nari Nari",
"rnw": "Rungwa",
"ro": "Romanian; Moldavian; Moldovan",
"roa": "Romance languages",
"rob": "Tae'",
"roc": "Cacgia Roglai",
"rod": "Rogo",
"roe": "Ronji",
"rof": "Rombo",
"rog": "Northern Roglai",
"rol": "Romblomanon",
"rom": "Romany",
"roo": "Rotokas",
"rop": "Kriol",
"ror": "Rongga",
"rou": "Runga",
"row": "Dela-Oenale",
"rpn": "Repanbitip",
"rpt": "Rapting",
"rri": "Ririo",
"rro": "Waima",
"rrt": "Arritinngithigh",
"rsb": "Romano-Serbian",
"rsk": "Ruthenian; Rusyn",
"rsl": "Russian Sign Language",
"rsm": "Miriwoong Sign Language",
"rsn": "Rwandan Sign Language",
"rtc": "Rungtu Chin",
"rth": "Ratahan",
"rtm": "Rotuman",
"rts": "Yurats",
"rtw": "Rathawi",
"ru": "Russian",
"rub": "Gungu",
"ruc": "Ruuli",
"rue": "Rusyn",
"ruf": "Luguru",
"rug": "Roviana",
"ruh": "Ruga",
"rui": "Rufiji",
"ruk": "Che",
"ruo": "Istro Romanian",
"rup": "Macedo-Romanian; Aromanian; Arumanian",
"ruq": "Megleno Romanian",
"rut": "Rutul",
"ruu": "Lanas Lobu",
"ruy": "Mala (Nigeria)",
"ruz": "Ruma",
"rw": "Kinyarwanda",
"rwa": "Rawo",
"rwk": "Rwa",
"rwl": "Ruwila",
"rwm": "Amba (Uganda)",
"rwo": "Rawa",
"rwr": "Marwari (India)",
"rxd": "Ngardi",
"rxw": "Karuwali; Garuwali",
"ryn": "Northern Amami-Oshima",
"rys": "Yaeyama",
"ryu": "Central Okinawan",
"rzh": "Rāziḥī",
"sa": "Sanskrit",
"saa": "Saba",
"sab": "Buglere",
"sac": "Meskwaki",
"sad": "Sandawe",
"sae": "Sabanê",
"saf": "Safaliba",
"sah": "Yakut",
"sai": "South American Indian languages",
"saj": "Sahu",
"sak": "Sake",
"sal": "Salishan languages",
"sam": "Samaritan Aramaic",
"sao": "Sause",
"saq": "Samburu",
"sar": "Saraveca",
"sas": "Sasak",
"sat": "Santali",
"sau": "Saleman",
"sav": "Saafi-Saafi",
"saw": "Sawi",
"sax": "Sa",
"say": "Saya",
"saz": "Saurashtra",
"sba": "Ngambay",
"sbb": "Simbo",
"sbc": "Kele (Papua New Guinea)",
"sbd": "Southern Samo",
"sbe": "Saliba",
"sbf": "Chabu; Shabo",
"sbg": "Seget",
"sbh": "Sori-Harengan",
"sbi": "Seti",
"sbj": "Surbakhal",
"sbk": "Safwa",
"sbl": "Botolan Sambal",
"sbm": "Sagala",
"sbn": "Sindhi Bhil",
"sbo": "Sabüm",
"sbp": "Sangu (Tanzania)",
"sbq": "Sileibi",
"sbr": "Sembakung Murut",
"sbs": "Subiya",
"sbt": "Kimki",
"sbu": "Stod Bhoti",
"sbv": "Sabine",
"sbw": "Simba",
"sbx": "Seberuang",
"sby": "Soli",
"sbz": "Sara Kaba",
"sc": "Sardinian",
"scb": "Chut",
"sce": "Dongxiang",
"scf": "San Miguel Creole French",
"scg": "Sanggau",
"sch": "Sakachep",
"sci": "Sri Lankan Creole Malay",
"sck": "Sadri",
"scl": "Shina",
"scn": "Sicilian",
"sco": "Scots",
"scp": "Hyolmo; Helambu Sherpa",
"scq": "Sa'och",
"scs": "North Slavey",
"sct": "Southern Katang",
"scu": "Shumcho",
"scv": "Sheni",
"scw": "Sha",
"scx": "Sicel",
"sd": "Sindhi",
"sda": "Toraja-Sa'dan",
"sdb": "Shabak",
"sdc": "Sassarese Sardinian",
"sde": "Surubu",
"sdf": "Sarli",
"sdg": "Savi",
"sdh": "Southern Kurdish",
"sdj": "Suundi",
"sdk": "Sos Kundi",
"sdl": "Saudi Arabian Sign Language",
"sdn": "Gallurese Sardinian",
"sdo": "Bukar-Sadung Bidayuh",
"sdp": "Sherdukpen",
"sdq": "Semandang",
"sdr": "Oraon Sadri",
"sds": "Sened",
"sdt": "Shuadit",
"sdu": "Sarudu",
"sdv": "Eastern Sudanic languages",
"sdx": "Sibu Melanau",
"sdz": "Sallands",
"se": "Northern Sami",
"sea": "Semai",
"seb": "Shempire Senoufo",
"sec": "Sechelt",
"sed": "Sedang",
"see": "Seneca",
"sef": "Cebaara Senoufo",
"seg": "Segeju",
"seh": "Sena",
"sei": "Seri",
"sej": "Sene",
"sek": "Sekani",
"sel": "Selkup",
"sem": "Semitic languages",
"sen": "Nanerigé Sénoufo",
"seo": "Suarmin",
"sep": "Sìcìté Sénoufo",
"seq": "Senara Sénoufo",
"ser": "Serrano",
"ses": "Koyraboro Senni Songhai",
"set": "Sentani",
"seu": "Serui-Laut",
"sev": "Nyarafolo Senoufo",
"sew": "Sewa Bay",
"sey": "Secoya",
"sez": "Senthang Chin",
"sfb": "Langue des signes de Belgique Francophone; French Belgian Sign Language",
"sfe": "Eastern Subanen",
"sfm": "Small Flowery Miao",
"sfs": "South African Sign Language",
"sfw": "Sehwi",
"sg": "Sango",
"sga": "Old Irish (to 900)",
"sgb": "Mag-antsi Ayta",
"sgc": "Kipsigis",
"sgd": "Surigaonon",
"sge": "Segai",
"sgg": "Swiss-German Sign Language",
"sgh": "Shughni",
"sgi": "Suga",
"sgj": "Surgujia",
"sgk": "Sangkong",
"sgm": "Singa",
"sgn": "Sign languages",
"sgp": "Singpho",
"sgr": "Sangisari",
"sgs": "Samogitian",
"sgt": "Brokpake",
"sgu": "Salas",
"sgw": "Sebat Bet Gurage",
"sgx": "Sierra Leone Sign Language",
"sgy": "Sanglechi",
"sgz": "Sursurunga",
"sh": "Serbo-Croatian",
"sha": "Shall-Zwall",
"shb": "Ninam",
"shc": "Sonde",
"shd": "Kundal Shahi",
"she": "Sheko",
"shg": "Shua",
"shh": "Shoshoni",
"shi": "Tachelhit",
"shj": "Shatt",
"shk": "Shilluk",
"shl": "Shendu",
"shm": "Shahrudi",
"shn": "Shan",
"sho": "Shanga",
"shp": "Shipibo-Conibo",
"shq": "Sala",
"shr": "Shi",
"shs": "Shuswap",
"sht": "Shasta",
"shu": "Chadian Arabic",
"shv": "Shehri",
"shw": "Shwai",
"shx": "She",
"shy": "Tachawit",
"shz": "Syenara Senoufo",
"si": "Sinhala; Sinhalese",
"sia": "Akkala Sami",
"sib": "Sebop",
"sid": "Sidamo",
"sie": "Simaa",
"sif": "Siamou",
"sig": "Paasaal",
"sih": "Zire; Sîshëë",
"sii": "Shom Peng",
"sij": "Numbami",
"sik": "Sikiana",
"sil": "Tumulung Sisaala",
"sim": "Mende (Papua New Guinea)",
"sio": "Siouan languages",
"sip": "Sikkimese",
"siq": "Sonia",
"sir": "Siri",
"sis": "Siuslaw",
"sit": "Sino-Tibetan languages",
"siu": "Sinagen",
"siv": "Sumariup",
"siw": "Siwai",
"six": "Sumau",
"siy": "Sivandi",
"siz": "Siwi",
"sja": "Epena",
"sjb": "Sajau Basap",
"sjd": "Kildin Sami",
"sje": "Pite Sami",
"sjg": "Assangori",
"sjk": "Kemi Sami",
"sjl": "Sajalong; Miji",
"sjm": "Mapun",
"sjn": "Sindarin",
"sjo": "Xibe",
"sjp": "Surjapuri",
"sjr": "Siar-Lak",
"sjs": "Senhaja De Srair",
"sjt": "Ter Sami",
"sju": "Ume Sami",
"sjw": "Shawnee",
"sk": "Slovak",
"ska": "Skagit",
"skb": "Saek",
"skc": "Ma Manda",
"skd": "Southern Sierra Miwok",
"ske": "Seke (Vanuatu)",
"skf": "Sakirabiá",
"skg": "Sakalava Malagasy",
"skh": "Sikule",
"ski": "Sika",
"skj": "Seke (Nepal)",
"skm": "Kutong",
"skn": "Kolibugan Subanon",
"sko": "Seko Tengah",
"skp": "Sekapan",
"skq": "Sininkere",
"skr": "Saraiki; Seraiki",
"sks": "Maia",
"skt": "Sakata",
"sku": "Sakao",
"skv": "Skou",
"skw": "Skepi Creole Dutch",
"skx": "Seko Padang",
"sky": "Sikaiana",
"skz": "Sekar",
"sl": "Slovenian",
"sla": "Slavic languages",
"slc": "Sáliba",
"sld": "Sissala",
"sle": "Sholaga",
"slf": "Swiss-Italian Sign Language",
"slg": "Selungai Murut",
"slh": "Southern Puget Sound Salish",
"sli": "Lower Silesian",
"slj": "Salumá",
"sll": "Salt-Yui",
"slm": "Pangutaran Sama",
"sln": "Salinan",
"slp": "Lamaholot",
"slq": "Salchuq",
"slr": "Salar",
"sls": "Singapore Sign Language",
"slt": "Sila",
"slu": "Selaru",
"slw": "Sialum",
"slx": "Salampasu",
"sly": "Selayar",
"slz": "Ma'ya",
"sm": "Samoan",
"sma": "Southern Sami",
"smb": "Simbari",
"smc": "Som",
"smf": "Auwe",
"smg": "Simbali",
"smh": "Samei",
"smi": "Sami languages",
"smj": "Lule Sami",
"smk": "Bolinao",
"sml": "Central Sama",
"smm": "Musasa",
"smn": "Inari Sami",
"smp": "Samaritan",
"smq": "Samo",
"smr": "Simeulue",
"sms": "Skolt Sami",
"smt": "Simte",
"smu": "Somray",
"smv": "Samvedi",
"smw": "Sumbawa",
"smx": "Samba",
"smy": "Semnani",
"smz": "Simeku",
"sn": "Shona",
"snc": "Sinaugoro",
"sne": "Bau Bidayuh",
"snf": "Noon",
"sng": "Sanga (Democratic Republic of Congo)",
"sni": "Sensi",
"snj": "Riverain Sango",
"snk": "Soninke",
"snl": "Sangil",
"snm": "Southern Ma'di",
"snn": "Siona",
"sno": "Snohomish",
"snp": "Siane",
"snq": "Sangu (Gabon)",
"snr": "Sihan",
"sns": "South West Bay; Nahavaq",
"snu": "Senggi; Viid",
"snv": "Sa'ban",
"snw": "Selee",
"snx": "Sam",
"sny": "Saniyo-Hiyewe",
"snz": "Kou",
"so": "Somali",
"soa": "Thai Song",
"sob": "Sobei",
"soc": "So (Democratic Republic of Congo)",
"sod": "Songoora",
"soe": "Songomeno",
"sog": "Sogdian",
"soh": "Aka",
"soi": "Sonha",
"soj": "Soi",
"sok": "Sokoro",
"sol": "Solos",
"son": "Songhai languages",
"soo": "Songo",
"sop": "Songe",
"soq": "Kanasi",
"sor": "Somrai",
"sos": "Seeku",
"sou": "Southern Thai",
"sov": "Sonsorol",
"sow": "Sowanda",
"sox": "Swo",
"soy": "Miyobe",
"soz": "Temi",
"spb": "Sepa (Indonesia)",
"spc": "Sapé",
"spd": "Saep",
"spe": "Sepa (Papua New Guinea)",
"spg": "Sian",
"spi": "Saponi",
"spk": "Sengo",
"spl": "Selepet",
"spm": "Akukem",
"spn": "Sanapaná",
"spo": "Spokane",
"spp": "Supyire Senoufo",
"spq": "Loreto-Ucayali Spanish",
"spr": "Saparua",
"sps": "Saposa",
"spt": "Spiti Bhoti",
"spu": "Sapuan",
"spv": "Sambalpuri; Kosli",
"spx": "South Picene",
"spy": "Sabaot",
"sq": "Albanian",
"sqa": "Shama-Sambuga",
"sqh": "Shau",
"sqj": "Albanian languages",
"sqk": "Albanian Sign Language",
"sqm": "Suma",
"sqn": "Susquehannock",
"sqo": "Sorkhei",
"sqq": "Sou",
"sqr": "Siculo Arabic",
"sqs": "Sri Lankan Sign Language",
"sqt": "Soqotri",
"squ": "Squamish",
"sqx": "Kufr Qassem Sign Language (KQSL)",
"sr": "Serbian",
"sra": "Saruga",
"srb": "Sora",
"src": "Logudorese Sardinian",
"sre": "Sara",
"srf": "Nafi",
"srg": "Sulod",
"srh": "Sarikoli",
"sri": "Siriano",
"srk": "Serudung Murut",
"srl": "Isirawa",
"srm": "Saramaccan",
"srn": "Sranan Tongo",
"sro": "Campidanese Sardinian",
"srq": "Sirionó",
"srr": "Serer",
"srs": "Sarsi",
"srt": "Sauri",
"sru": "Suruí",
"srv": "Southern Sorsoganon",
"srw": "Serua",
"srx": "Sirmauri",
"sry": "Sera",
"srz": "Shahmirzadi",
"ss": "Swati",
"ssa": "Nilo-Saharan languages",
"ssb": "Southern Sama",
"ssc": "Suba-Simbiti",
"ssd": "Siroi",
"sse": "Balangingi; Bangingih Sama",
"ssf": "Thao",
"ssg": "Seimat",
"ssh": "Shihhi Arabic",
"ssi": "Sansi",
"ssj": "Sausi",
"ssk": "Sunam",
"ssl": "Western Sisaala",
"ssm": "Semnam",
"ssn": "Waata",
"sso": "Sissano",
"ssp": "Spanish Sign Language",
"ssq": "So'a",
"ssr": "Swiss-French Sign Language",
"sss": "Sô",
"sst": "Sinasina",
"ssu": "Susuami",
"ssv": "Shark Bay",
"ssx": "Samberigi",
"ssy": "Saho",
"ssz": "Sengseng",
"st": "Southern Sotho",
"sta": "Settla",
"stb": "Northern Subanen",
"std": "Sentinel",
"ste": "Liana-Seti",
"stf": "Seta",
"stg": "Trieng",
"sth": "Shelta",
"sti": "Bulo Stieng",
"stj": "Matya Samo",
"stk": "Arammba",
"stl": "Stellingwerfs",
"stm": "Setaman",
"stn": "Owa",
"sto": "Stoney",
"stp": "Southeastern Tepehuan",
"stq": "Saterfriesisch",
"str": "Straits Salish",
"sts": "Shumashti",
"stt": "Budeh Stieng",
"stu": "Samtao",
"stv": "Silt'e",
"stw": "Satawalese",
"sty": "Siberian Tatar",
"su": "Sundanese",
"sua": "Sulka",
"sub": "Suku",
"suc": "Western Subanon",
"sue": "Suena",
"sug": "Suganga",
"sui": "Suki",
"suj": "Shubi",
"suk": "Sukuma",
"suo": "Bouni",
"suq": "Tirmaga-Chai Suri; Suri",
"sur": "Mwaghavul",
"sus": "Susu",
"sut": "Subtiaba",
"suv": "Puroik",
"suw": "Sumbwa",
"sux": "Sumerian",
"suy": "Suyá",
"suz": "Sunwar",
"sv": "Swedish",
"sva": "Svan",
"svb": "Ulau-Suain",
"svc": "Vincentian Creole English",
"sve": "Serili",
"svk": "Slovakian Sign Language",
"svm": "Slavomolisano",
"svs": "Savosavo",
"svx": "Skalvian",
"sw": "Swahili (macrolanguage)",
"swb": "Maore Comorian",
"swc": "Congo Swahili",
"swf": "Sere",
"swg": "Swabian",
"swh": "Swahili (individual language); Kiswahili",
"swi": "Sui",
"swj": "Sira",
"swk": "Malawi Sena",
"swl": "Swedish Sign Language",
"swm": "Samosa",
"swn": "Sawknah",
"swo": "Shanenawa",
"swp": "Suau",
"swq": "Sharwa",
"swr": "Saweru",
"sws": "Seluwasan",
"swt": "Sawila",
"swu": "Suwawa",
"swv": "Shekhawati",
"sww": "Sowa",
"swx": "Suruahá",
"swy": "Sarua",
"sxb": "Suba",
"sxc": "Sicanian",
"sxe": "Sighu",
"sxg": "Shuhi; Shixing",
"sxk": "Southern Kalapuya",
"sxl": "Selian",
"sxm": "Samre",
"sxn": "Sangir",
"sxo": "Sorothaptic",
"sxr": "Saaroa",
"sxs": "Sasaru",
"sxu": "Upper Saxon",
"sxw": "Saxwe Gbe",
"sya": "Siang",
"syb": "Central Subanen",
"syc": "Classical Syriac",
"syd": "Samoyedic languages",
"syi": "Seki",
"syk": "Sukur",
"syl": "Sylheti",
"sym": "Maya Samo",
"syn": "Senaya",
"syo": "Suoy",
"syr": "Syriac",
"sys": "Sinyar",
"syw": "Kagate",
"syx": "Samay",
"syy": "Al-Sayyid Bedouin Sign Language",
"sza": "Semelai",
"szb": "Ngalum",
"szc": "Semaq Beri",
"szd": "Seru",
"sze": "Seze",
"szg": "Sengele",
"szl": "Silesian",
"szn": "Sula",
"szp": "Suabo",
"szs": "Solomon Islands Sign Language",
"szv": "Isu (Fako Division)",
"szw": "Sawai",
"szy": "Sakizaya",
"ta": "Tamil",
"taa": "Lower Tanana",
"tab": "Tabassaran",
"tac": "Lowland Tarahumara",
"tad": "Tause",
"tae": "Tariana",
"taf": "Tapirapé",
"tag": "Tagoi",
"tai": "Tai languages",
"taj": "Eastern Tamang",
"tak": "Tala",
"tal": "Tal",
"tan": "Tangale",
"tao": "Yami",
"tap": "Taabwa",
"taq": "Tamasheq",
"tar": "Central Tarahumara",
"tas": "Tay Boi",
"tau": "Upper Tanana",
"tav": "Tatuyo",
"taw": "Tai",
"tax": "Tamki",
"tay": "Atayal",
"taz": "Tocho",
"tba": "Aikanã",
"tbc": "Takia",
"tbd": "Kaki Ae",
"tbe": "Tanimbili",
"tbf": "Mandara",
"tbg": "North Tairora",
"tbh": "Dharawal; Thurawal",
"tbi": "Gaam",
"tbj": "Tiang",
"tbk": "Calamian Tagbanwa",
"tbl": "Tboli",
"tbm": "Tagbu",
"tbn": "Barro Negro Tunebo",
"tbo": "Tawala",
"tbp": "Taworta; Diebroud",
"tbq": "Tibeto-Burman languages",
"tbr": "Tumtum",
"tbs": "Tanguat",
"tbt": "Tembo (Kitembo)",
"tbu": "Tubar",
"tbv": "Tobo",
"tbw": "Tagbanwa",
"tbx": "Kapin",
"tby": "Tabaru",
"tbz": "Ditammari",
"tca": "Ticuna",
"tcb": "Tanacross",
"tcc": "Datooga",
"tcd": "Tafi",
"tce": "Southern Tutchone",
"tcf": "Malinaltepec Me'phaa; Malinaltepec Tlapanec",
"tcg": "Tamagario",
"tch": "Turks And Caicos Creole English",
"tci": "Wára",
"tck": "Tchitchege",
"tcl": "Taman (Myanmar)",
"tcm": "Tanahmerah",
"tcn": "Tichurong",
"tco": "Taungyo",
"tcp": "Tawr Chin",
"tcq": "Kaiy",
"tcs": "Torres Strait Creole; Yumplatok",
"tct": "T'en",
"tcu": "Southeastern Tarahumara",
"tcw": "Tecpatlán Totonac",
"tcx": "Toda",
"tcy": "Tulu",
"tcz": "Thado Chin",
"tda": "Tagdal",
"tdb": "Panchpargania",
"tdc": "Emberá-Tadó",
"tdd": "Tai Nüa",
"tde": "Tiranige Diga Dogon",
"tdf": "Talieng",
"tdg": "Western Tamang",
"tdh": "Thulung",
"tdi": "Tomadino",
"tdj": "Tajio",
"tdk": "Tambas",
"tdl": "Sur",
"tdm": "Taruma",
"tdn": "Tondano",
"tdo": "Teme",
"tdq": "Tita",
"tdr": "Todrah",
"tds": "Doutai",
"tdt": "Tetun Dili",
"tdv": "Toro",
"tdx": "Tandroy-Mahafaly Malagasy",
"tdy": "Tadyawan",
"te": "Telugu",
"tea": "Temiar",
"teb": "Tetete",
"tec": "Terik",
"ted": "Tepo Krumen",
"tee": "Huehuetla Tepehua",
"tef": "Teressa",
"teg": "Teke-Tege",
"teh": "Tehuelche",
"tei": "Torricelli",
"tek": "Ibali Teke",
"tem": "Timne",
"ten": "Tama (Colombia)",
"teo": "Teso",
"tep": "Tepecano",
"teq": "Temein",
"ter": "Tereno",
"tes": "Tengger",
"tet": "Tetum",
"teu": "Soo",
"tev": "Teor",
"tew": "Tewa (USA)",
"tex": "Tennet",
"tey": "Tulishi",
"tez": "Tetserret",
"tfi": "Tofin Gbe",
"tfn": "Tanaina",
"tfo": "Tefaro",
"tfr": "Teribe",
"tft": "Ternate",
"tg": "Tajik",
"tga": "Sagalla",
"tgb": "Tobilung",
"tgc": "Tigak",
"tgd": "Ciwogai",
"tge": "Eastern Gorkha Tamang",
"tgf": "Chalikha",
"tgh": "Tobagonian Creole English",
"tgi": "Lawunuia",
"tgj": "Tagin",
"tgn": "Tandaganon",
"tgo": "Sudest",
"tgp": "Tangoa",
"tgq": "Tring",
"tgr": "Tareng",
"tgs": "Nume",
"tgt": "Central Tagbanwa",
"tgu": "Tanggu",
"tgv": "Tingui-Boto",
"tgw": "Tagwana Senoufo",
"tgx": "Tagish",
"tgy": "Togoyo",
"tgz": "Tagalaka",
"th": "Thai",
"thd": "Kuuk Thaayorre; Thayore",
"the": "Chitwania Tharu",
"thf": "Thangmi",
"thh": "Northern Tarahumara",
"thi": "Tai Long",
"thk": "Tharaka; Kitharaka",
"thl": "Dangaura Tharu",
"thm": "Aheu",
"thn": "Thachanadan",
"thp": "Thompson",
"thq": "Kochila Tharu",
"thr": "Rana Tharu",
"ths": "Thakali",
"tht": "Tahltan",
"thu": "Thuri",
"thv": "Tahaggart Tamahaq",
"thy": "Tha",
"thz": "Tayart Tamajeq",
"ti": "Tigrinya",
"tia": "Tidikelt Tamazight",
"tic": "Tira",
"tif": "Tifal",
"tig": "Tigre",
"tih": "Timugon Murut",
"tii": "Tiene",
"tij": "Tilung",
"tik": "Tikar",
"til": "Tillamook",
"tim": "Timbe",
"tin": "Tindi",
"tio": "Teop",
"tip": "Trimuris",
"tiq": "Tiéfo",
"tis": "Masadiit Itneg",
"tit": "Tinigua",
"tiu": "Adasen",
"tiv": "Tiv",
"tiw": "Tiwi",
"tix": "Southern Tiwa",
"tiy": "Tiruray",
"tiz": "Tai Hongjin",
"tja": "Tajuasohn",
"tjg": "Tunjung",
"tji": "Northern Tujia",
"tjj": "Tjungundji",
"tjl": "Tai Laing",
"tjm": "Timucua",
"tjn": "Tonjon",
"tjo": "Temacine Tamazight",
"tjp": "Tjupany",
"tjs": "Southern Tujia",
"tju": "Tjurruru",
"tjw": "Djabwurrung",
"tk": "Turkmen",
"tka": "Truká",
"tkb": "Buksa",
"tkd": "Tukudede",
"tke": "Takwane",
"tkf": "Tukumanféd",
"tkg": "Tesaka Malagasy",
"tkl": "Tokelau",
"tkm": "Takelma",
"tkn": "Toku-No-Shima",
"tkp": "Tikopia",
"tkq": "Tee",
"tkr": "Tsakhur",
"tks": "Takestani",
"tkt": "Kathoriya Tharu",
"tku": "Upper Necaxa Totonac",
"tkv": "Mur Pano",
"tkw": "Teanu",
"tkx": "Tangko",
"tkz": "Takua",
"tl": "Tagalog",
"tla": "Southwestern Tepehuan",
"tlb": "Tobelo",
"tlc": "Yecuatla Totonac",
"tld": "Talaud",
"tlf": "Telefol",
"tlg": "Tofanma",
"tlh": "Klingon; tlhIngan Hol",
"tli": "Tlingit",
"tlj": "Talinga-Bwisi",
"tlk": "Taloki",
"tll": "Tetela",
"tlm": "Tolomako",
"tln": "Talondo'",
"tlo": "Talodi",
"tlp": "Filomena Mata-Coahuitlán Totonac",
"tlq": "Tai Loi",
"tlr": "Talise",
"tls": "Tambotalo",
"tlt": "Sou Nama; Teluti",
"tlu": "Tulehu",
"tlv": "Taliabu",
"tlx": "Khehek",
"tly": "Talysh",
"tma": "Tama (Chad)",
"tmb": "Katbol; Avava",
"tmc": "Tumak",
"tmd": "Haruai",
"tme": "Tremembé",
"tmf": "Toba-Maskoy",
"tmg": "Ternateño",
"tmh": "Tamashek",
"tmi": "Tutuba",
"tmj": "Samarokena",
"tmk": "Northwestern Tamang",
"tml": "Tamnim Citak",
"tmm": "Tai Thanh",
"tmn": "Taman (Indonesia)",
"tmo": "Temoq",
"tmq": "Tumleo",
"tmr": "Jewish Babylonian Aramaic (ca. 200-1200 CE)",
"tms": "Tima",
"tmt": "Tasmate",
"tmu": "Iau",
"tmv": "Tembo (Motembo)",
"tmw": "Temuan",
"tmy": "Tami",
"tmz": "Tamanaku",
"tn": "Tswana",
"tna": "Tacana",
"tnb": "Western Tunebo",
"tnc": "Tanimuca-Retuarã",
"tnd": "Angosturas Tunebo",
"tng": "Tobanga",
"tnh": "Maiani",
"tni": "Tandia",
"tnk": "Kwamera",
"tnl": "Lenakel",
"tnm": "Tabla",
"tnn": "North Tanna",
"tno": "Toromono",
"tnp": "Whitesands",
"tnq": "Taino",
"tnr": "Ménik",
"tns": "Tenis",
"tnt": "Tontemboan",
"tnu": "Tay Khang",
"tnv": "Tangchangya",
"tnw": "Tonsawang",
"tnx": "Tanema",
"tny": "Tongwe",
"tnz": "Ten'edn",
"to": "Tonga (Tonga Islands)",
"tob": "Toba",
"toc": "Coyutla Totonac",
"tod": "Toma",
"tof": "Gizrra",
"tog": "Tonga (Nyasa)",
"toh": "Gitonga",
"toi": "Tonga (Zambia)",
"toj": "Tojolabal",
"tok": "Toki Pona",
"tol": "Tolowa",
"tom": "Tombulu",
"too": "Xicotepec De Juárez Totonac",
"top": "Papantla Totonac",
"toq": "Toposa",
"tor": "Togbo-Vara Banda",
"tos": "Highland Totonac",
"tou": "Tho",
"tov": "Upper Taromi",
"tow": "Jemez",
"tox": "Tobian",
"toy": "Topoiyo",
"toz": "To",
"tpa": "Taupota",
"tpc": "Azoyú Me'phaa; Azoyú Tlapanec",
"tpe": "Tippera",
"tpf": "Tarpia",
"tpg": "Kula",
"tpi": "Tok Pisin",
"tpj": "Tapieté",
"tpk": "Tupinikin",
"tpl": "Tlacoapa Me'phaa; Tlacoapa Tlapanec",
"tpm": "Tampulma",
"tpn": "Tupinambá",
"tpo": "Tai Pao",
"tpp": "Pisaflores Tepehua",
"tpq": "Tukpa",
"tpr": "Tuparí",
"tpt": "Tlachichilco Tepehua",
"tpu": "Tampuan",
"tpv": "Tanapag",
"tpw": "Tupí",
"tpx": "Acatepec Me'phaa; Acatepec Tlapanec",
"tpy": "Trumai",
"tpz": "Tinputz",
"tqb": "Tembé",
"tql": "Lehali",
"tqm": "Turumsa",
"tqn": "Tenino",
"tqo": "Toaripi",
"tqp": "Tomoip",
"tqq": "Tunni",
"tqr": "Torona",
"tqt": "Western Totonac",
"tqu": "Touo",
"tqw": "Tonkawa",
"tr": "Turkish",
"tra": "Tirahi",
"trb": "Terebu",
"trc": "Copala Triqui",
"trd": "Turi",
"tre": "East Tarangan",
"trf": "Trinidadian Creole English",
"trg": "Lishán Didán",
"trh": "Turaka",
"tri": "Trió",
"trj": "Toram",
"trk": "Turkic languages",
"trl": "Traveller Scottish",
"trm": "Tregami",
"trn": "Trinitario",
"tro": "Tarao Naga",
"trp": "Kok Borok",
"trq": "San Martín Itunyoso Triqui",
"trr": "Taushiro",
"trs": "Chicahuaxtla Triqui",
"trt": "Tunggare",
"tru": "Turoyo; Surayt",
"trv": "Sediq; Seediq; Taroko",
"trw": "Torwali",
"trx": "Tringgus-Sembaan Bidayuh",
"try": "Turung",
"trz": "Torá",
"ts": "Tsonga",
"tsa": "Tsaangi",
"tsb": "Tsamai",
"tsc": "Tswa",
"tsd": "Tsakonian",
"tse": "Tunisian Sign Language",
"tsg": "Tausug",
"tsh": "Tsuvan",
"tsi": "Tsimshian",
"tsj": "Tshangla",
"tsk": "Tseku",
"tsl": "Ts'ün-Lao",
"tsm": "Turkish Sign Language; Türk İşaret Dili",
"tsp": "Northern Toussian",
"tsq": "Thai Sign Language",
"tsr": "Akei",
"tss": "Taiwan Sign Language",
"tst": "Tondi Songway Kiini",
"tsu": "Tsou",
"tsv": "Tsogo",
"tsw": "Tsishingini",
"tsx": "Mubami",
"tsy": "Tebul Sign Language",
"tsz": "Purepecha",
"tt": "Tatar",
"tta": "Tutelo",
"ttb": "Gaa",
"ttc": "Tektiteko",
"ttd": "Tauade",
"tte": "Bwanabwana",
"ttf": "Tuotomb",
"ttg": "Tutong",
"tth": "Upper Ta'oih",
"tti": "Tobati",
"ttj": "Tooro",
"ttk": "Totoro",
"ttl": "Totela",
"ttm": "Northern Tutchone",
"ttn": "Towei",
"tto": "Lower Ta'oih",
"ttp": "Tombelala",
"ttq": "Tawallammat Tamajaq",
"ttr": "Tera",
"tts": "Northeastern Thai",
"ttt": "Muslim Tat",
"ttu": "Torau",
"ttv": "Titan",
"ttw": "Long Wat",
"tty": "Sikaritai",
"ttz": "Tsum",
"tua": "Wiarumus",
"tub": "Tübatulabal",
"tuc": "Mutu",
"tud": "Tuxá",
"tue": "Tuyuca",
"tuf": "Central Tunebo",
"tug": "Tunia",
"tuh": "Taulil",
"tui": "Tupuri",
"tuj": "Tugutil",
"tul": "Tula",
"tum": "Tumbuka",
"tun": "Tunica",
"tuo": "Tucano",
"tup": "Tupi languages",
"tuq": "Tedaga",
"tus": "Tuscarora",
"tut": "Altaic languages",
"tuu": "Tututni",
"tuv": "Turkana",
"tuw": "Tungus languages",
"tux": "Tuxináwa",
"tuy": "Tugen",
"tuz": "Turka",
"tva": "Vaghua",
"tvd": "Tsuvadi",
"tve": "Te'un",
"tvk": "Southeast Ambrym",
"tvl": "Tuvalu",
"tvm": "Tela-Masbuar",
"tvn": "Tavoyan",
"tvo": "Tidore",
"tvs": "Taveta",
"tvt": "Tutsa Naga",
"tvu": "Tunen",
"tvw": "Sedoa",
"tvx": "Taivoan",
"tvy": "Timor Pidgin",
"tw": "Twi",
"twa": "Twana",
"twb": "Western Tawbuid",
"twc": "Teshenawa",
"twd": "Twents",
"twe": "Tewa (Indonesia)",
"twf": "Northern Tiwa",
"twg": "Tereweng",
"twh": "Tai Dón",
"twl": "Tawara",
"twm": "Tawang Monpa",
"twn": "Twendi",
"two": "Tswapong",
"twp": "Ere",
"twq": "Tasawaq",
"twr": "Southwestern Tarahumara",
"twt": "Turiwára",
"twu": "Termanu",
"tww": "Tuwari",
"twx": "Tewe",
"twy": "Tawoyan",
"txa": "Tombonuo",
"txb": "Tokharian B",
"txc": "Tsetsaut",
"txe": "Totoli",
"txg": "Tangut",
"txh": "Thracian",
"txi": "Ikpeng",
"txj": "Tarjumo",
"txm": "Tomini",
"txn": "West Tarangan",
"txo": "Toto",
"txq": "Tii",
"txr": "Tartessian",
"txs": "Tonsea",
"txt": "Citak",
"txu": "Kayapó",
"txx": "Tatana",
"txy": "Tanosy Malagasy",
"ty": "Tahitian",
"tya": "Tauya",
"tye": "Kyanga",
"tyh": "O'du",
"tyi": "Teke-Tsaayi",
"tyj": "Tai Do; Tai Yo",
"tyl": "Thu Lao",
"tyn": "Kombai",
"typ": "Thaypan",
"tyr": "Tai Daeng",
"tys": "Tày Sa Pa",
"tyt": "Tày Tac",
"tyu": "Kua",
"tyv": "Tuvinian",
"tyx": "Teke-Tyee",
"tyy": "Tiyaa",
"tyz": "Tày",
"tza": "Tanzanian Sign Language",
"tzh": "Tzeltal",
"tzj": "Tz'utujil",
"tzl": "Talossan",
"tzm": "Central Atlas Tamazight",
"tzn": "Tugun",
"tzo": "Tzotzil",
"tzx": "Tabriak",
"uam": "Uamué",
"uan": "Kuan",
"uar": "Tairuma",
"uba": "Ubang",
"ubi": "Ubi",
"ubl": "Buhi'non Bikol",
"ubr": "Ubir",
"ubu": "Umbu-Ungu",
"uby": "Ubykh",
"uda": "Uda",
"ude": "Udihe",
"udg": "Muduga",
"udi": "Udi",
"udj": "Ujir",
"udl": "Wuzlam",
"udm": "Udmurt",
"udu": "Uduk",
"ues": "Kioko",
"ufi": "Ufim",
"ug": "Uighur; Uyghur",
"uga": "Ugaritic",
"ugb": "Kuku-Ugbanh",
"uge": "Ughele",
"ugh": "Kubachi",
"ugn": "Ugandan Sign Language",
"ugo": "Ugong",
"ugy": "Uruguayan Sign Language",
"uha": "Uhami",
"uhn": "Damal",
"uis": "Uisai",
"uiv": "Iyive",
"uji": "Tanjijili",
"uk": "Ukrainian",
"uka": "Kaburi",
"ukg": "Ukuriguma",
"ukh": "Ukhwejo",
"uki": "Kui (India)",
"ukk": "Muak Sa-aak",
"ukl": "Ukrainian Sign Language",
"ukp": "Ukpe-Bayobiri",
"ukq": "Ukwa",
"uks": "Urubú-Kaapor Sign Language; Kaapor Sign Language",
"uku": "Ukue",
"ukv": "Kuku",
"ukw": "Ukwuani-Aboh-Ndoni",
"uky": "Kuuk-Yak",
"ula": "Fungwa",
"ulb": "Ulukwumi",
"ulc": "Ulch",
"ule": "Lule",
"ulf": "Usku; Afra",
"uli": "Ulithian",
"ulk": "Meriam Mir",
"ull": "Ullatan",
"ulm": "Ulumanda'",
"uln": "Unserdeutsch",
"ulu": "Uma' Lung",
"ulw": "Ulwa",
"uma": "Umatilla",
"umb": "Umbundu",
"umc": "Marrucinian",
"umd": "Umbindhamu",
"umg": "Morrobalama; Umbuygamu",
"umi": "Ukit",
"umm": "Umon",
"umn": "Makyan Naga",
"umo": "Umotína",
"ump": "Umpila",
"umr": "Umbugarla",
"ums": "Pendau",
"umu": "Munsee",
"una": "North Watut",
"und": "Undetermined",
"une": "Uneme",
"ung": "Ngarinyin",
"uni": "Uni",
"unk": "Enawené-Nawé",
"unm": "Unami",
"unn": "Kurnai",
"unr": "Mundari",
"unu": "Unubahe",
"unx": "Munda",
"unz": "Unde Kaili",
"uon": "Kulon",
"upi": "Umeda",
"upv": "Uripiv-Wala-Rano-Atchin",
"ur": "Urdu",
"ura": "Urarina",
"urb": "Urubú-Kaapor; Kaapor",
"urc": "Urningangg",
"ure": "Uru",
"urf": "Uradhi",
"urg": "Urigina",
"urh": "Urhobo",
"uri": "Urim",
"urj": "Uralic languages",
"urk": "Urak Lawoi'",
"url": "Urali",
"urm": "Urapmin",
"urn": "Uruangnirin",
"uro": "Ura (Papua New Guinea)",
"urp": "Uru-Pa-In",
"urr": "Lehalurup; Löyöp",
"urt": "Urat",
"uru": "Urumi",
"urv": "Uruava",
"urw": "Sop",
"urx": "Urimo",
"ury": "Orya",
"urz": "Uru-Eu-Wau-Wau",
"usa": "Usarufa",
"ush": "Ushojo",
"usi": "Usui",
"usk": "Usaghade",
"usp": "Uspanteco",
"uss": "us-Saare",
"usu": "Uya",
"uta": "Otank",
"ute": "Ute-Southern Paiute",
"uth": "ut-Hun",
"utp": "Amba (Solomon Islands)",
"utr": "Etulo",
"utu": "Utu",
"uum": "Urum",
"uur": "Ura (Vanuatu)",
"uuu": "U",
"uve": "West Uvean; Fagauvea",
"uvh": "Uri",
"uvl": "Lote",
"uwa": "Kuku-Uwanh",
"uya": "Doko-Uyanga",
"uz": "Uzbek",
"uzn": "Northern Uzbek",
"uzs": "Southern Uzbek",
"vaa": "Vaagri Booli",
"vae": "Vale",
"vaf": "Vafsi",
"vag": "Vagla",
"vah": "Varhadi-Nagpuri",
"vai": "Vai",
"vaj": "Sekele; Northwestern ǃKung; Vasekele",
"val": "Vehes",
"vam": "Vanimo",
"van": "Valman",
"vao": "Vao",
"vap": "Vaiphei",
"var": "Huarijio",
"vas": "Vasavi",
"vau": "Vanuma",
"vav": "Varli",
"vay": "Wayu",
"vbb": "Southeast Babar",
"vbk": "Southwestern Bontok",
"ve": "Venda",
"vec": "Venetian",
"ved": "Veddah",
"vel": "Veluws",
"vem": "Vemgo-Mabas",
"veo": "Ventureño",
"vep": "Veps",
"ver": "Mom Jango",
"vgr": "Vaghri",
"vgt": "Vlaamse Gebarentaal; Flemish Sign Language",
"vi": "Vietnamese",
"vic": "Virgin Islands Creole English",
"vid": "Vidunda",
"vif": "Vili",
"vig": "Viemo",
"vil": "Vilela",
"vin": "Vinza",
"vis": "Vishavan",
"vit": "Viti",
"viv": "Iduna",
"vka": "Kariyarra",
"vkj": "Kujarge",
"vkk": "Kaur",
"vkl": "Kulisusu",
"vkm": "Kamakan",
"vkn": "Koro Nulu",
"vko": "Kodeoha",
"vkp": "Korlai Creole Portuguese",
"vkt": "Tenggarong Kutai Malay",
"vku": "Kurrama",
"vkz": "Koro Zuba",
"vlp": "Valpei",
"vls": "Vlaams",
"vma": "Martuyhunira",
"vmb": "Barbaram",
"vmc": "Juxtlahuaca Mixtec",
"vmd": "Mudu Koraga",
"vme": "East Masela",
"vmf": "Mainfränkisch",
"vmg": "Lungalunga",
"vmh": "Maraghei",
"vmi": "Miwa",
"vmj": "Ixtayutla Mixtec",
"vmk": "Makhuwa-Shirima",
"vml": "Malgana",
"vmm": "Mitlatongo Mixtec",
"vmp": "Soyaltepec Mazatec",
"vmq": "Soyaltepec Mixtec",
"vmr": "Marenje",
"vms": "Moksela",
"vmu": "Muluridyi",
"vmv": "Valley Maidu",
"vmw": "Makhuwa",
"vmx": "Tamazola Mixtec",
"vmy": "Ayautla Mazatec",
"vmz": "Mazatlán Mazatec",
"vnk": "Vano; Lovono",
"vnm": "Vinmavis; Neve'ei",
"vnp": "Vunapu",
"vo": "Volapük",
"vor": "Voro",
"vot": "Votic",
"vra": "Vera'a",
"vro": "Võro",
"vrs": "Varisi",
"vrt": "Burmbar; Banam Bay",
"vsi": "Moldova Sign Language",
"vsl": "Venezuelan Sign Language",
"vsv": "Valencian Sign Language; Llengua de signes valenciana",
"vto": "Vitou",
"vum": "Vumbu",
"vun": "Vunjo",
"vut": "Vute",
"vwa": "Awa (China)",
"wa": "Walloon",
"waa": "Walla Walla",
"wab": "Wab",
"wac": "Wasco-Wishram",
"wad": "Wamesa; Wondama",
"wae": "Walser",
"waf": "Wakoná",
"wag": "Wa'ema",
"wah": "Watubela",
"wai": "Wares",
"waj": "Waffa",
"wak": "Wakashan languages",
"wal": "Wolaytta; Wolaitta",
"wam": "Wampanoag",
"wan": "Wan",
"wao": "Wappo",
"wap": "Wapishana",
"waq": "Wagiman",
"war": "Waray (Philippines)",
"was": "Washo",
"wat": "Kaninuwa",
"wau": "Waurá",
"wav": "Waka",
"waw": "Waiwai",
"wax": "Watam; Marangis",
"way": "Wayana",
"waz": "Wampur",
"wba": "Warao",
"wbb": "Wabo",
"wbe": "Waritai",
"wbf": "Wara",
"wbh": "Wanda",
"wbi": "Vwanji",
"wbj": "Alagwa",
"wbk": "Waigali",
"wbl": "Wakhi",
"wbm": "Wa",
"wbp": "Warlpiri",
"wbq": "Waddar",
"wbr": "Wagdi",
"wbs": "West Bengal Sign Language",
"wbt": "Warnman",
"wbv": "Wajarri",
"wbw": "Woi",
"wca": "Yanomámi",
"wci": "Waci Gbe",
"wdd": "Wandji",
"wdg": "Wadaginam",
"wdj": "Wadjiginy",
"wdk": "Wadikali",
"wdt": "Wendat",
"wdu": "Wadjigu",
"wdy": "Wadjabangayi",
"wea": "Wewaw",
"wec": "Wè Western",
"wed": "Wedau",
"weg": "Wergaia",
"weh": "Weh",
"wei": "Kiunum",
"wem": "Weme Gbe",
"wen": "Sorbian languages",
"weo": "Wemale",
"wep": "Westphalien",
"wer": "Weri",
"wes": "Cameroon Pidgin",
"wet": "Perai",
"weu": "Rawngtu Chin",
"wew": "Wejewa",
"wfg": "Yafi; Zorop",
"wga": "Wagaya",
"wgb": "Wagawaga",
"wgg": "Wangkangurru; Wangganguru",
"wgi": "Wahgi",
"wgo": "Waigeo",
"wgu": "Wirangu",
"wgy": "Warrgamay",
"wha": "Sou Upaa; Manusela",
"whg": "North Wahgi",
"whk": "Wahau Kenyah",
"whu": "Wahau Kayan",
"wib": "Southern Toussian",
"wic": "Wichita",
"wie": "Wik-Epa",
"wif": "Wik-Keyangan",
"wig": "Wik Ngathan",
"wih": "Wik-Me'anha",
"wii": "Minidien",
"wij": "Wik-Iiyanh",
"wik": "Wikalkan",
"wil": "Wilawila",
"wim": "Wik-Mungkan",
"win": "Ho-Chunk",
"wir": "Wiraféd",
"wiu": "Wiru",
"wiv": "Vitu",
"wiy": "Wiyot",
"wja": "Waja",
"wji": "Warji",
"wka": "Kw'adza",
"wkb": "Kumbaran",
"wkd": "Wakde; Mo",
"wkl": "Kalanadi",
"wkr": "Keerray-Woorroong",
"wku": "Kunduvadi",
"wkw": "Wakawaka",
"wky": "Wangkayutyuru",
"wla": "Walio",
"wlc": "Mwali Comorian",
"wle": "Wolane",
"wlg": "Kunbarlang",
"wlh": "Welaun",
"wli": "Waioli",
"wlk": "Wailaki",
"wll": "Wali (Sudan)",
"wlm": "Middle Welsh",
"wlo": "Wolio",
"wlr": "Wailapa",
"wls": "Wallisian",
"wlu": "Wuliwuli",
"wlv": "Wichí Lhamtés Vejoz",
"wlw": "Walak",
"wlx": "Wali (Ghana)",
"wly": "Waling",
"wma": "Mawa (Nigeria)",
"wmb": "Wambaya",
"wmc": "Wamas",
"wmd": "Mamaindé",
"wme": "Wambule",
"wmg": "Western Minyag",
"wmh": "Waima'a",
"wmi": "Wamin",
"wmm": "Maiwa (Indonesia)",
"wmn": "Waamwang",
"wmo": "Wom (Papua New Guinea)",
"wms": "Wambon",
"wmt": "Walmajarri",
"wmw": "Mwani",
"wmx": "Womo",
"wnb": "Wanambre",
"wnc": "Wantoat",
"wnd": "Wandarang",
"wne": "Waneci",
"wng": "Wanggom",
"wni": "Ndzwani Comorian",
"wnk": "Wanukaka",
"wnm": "Wanggamala",
"wnn": "Wunumara",
"wno": "Wano",
"wnp": "Wanap",
"wnu": "Usan",
"wnw": "Wintu",
"wny": "Wanyi; Waanyi",
"wo": "Wolof",
"woa": "Kuwema; Tyaraity",
"wob": "Wè Northern",
"woc": "Wogeo",
"wod": "Wolani",
"woe": "Woleaian",
"wof": "Gambian Wolof",
"wog": "Wogamusin",
"woi": "Kamang",
"wok": "Longto",
"wom": "Wom (Nigeria)",
"won": "Wongo",
"woo": "Manombai",
"wor": "Woria",
"wos": "Hanga Hundi",
"wow": "Wawonii",
"woy": "Weyto",
"wpc": "Maco",
"wrb": "Waluwarra; Warluwara",
"wrg": "Warungu; Gudjal",
"wrh": "Wiradjuri",
"wri": "Wariyangga",
"wrk": "Garrwa",
"wrl": "Warlmanpa",
"wrm": "Warumungu",
"wrn": "Warnang",
"wro": "Worrorra",
"wrp": "Waropen",
"wrr": "Wardaman",
"wrs": "Waris",
"wru": "Waru",
"wrv": "Waruna",
"wrw": "Gugu Warra",
"wrx": "Wae Rana",
"wry": "Merwari",
"wrz": "Waray (Australia)",
"wsa": "Warembori",
"wsg": "Adilabad Gondi",
"wsi": "Wusi",
"wsk": "Waskia",
"wsr": "Owenia",
"wss": "Wasa",
"wsu": "Wasu",
"wsv": "Wotapuri-Katarqalai",
"wtf": "Watiwa",
"wth": "Wathawurrung",
"wti": "Berta",
"wtk": "Watakataui",
"wtm": "Mewati",
"wtw": "Wotu",
"wua": "Wikngenchera",
"wub": "Wunambal",
"wud": "Wudu",
"wuh": "Wutunhua",
"wul": "Silimo",
"wum": "Wumbvu",
"wun": "Bungu",
"wur": "Wurrugu",
"wut": "Wutung",
"wuu": "Wu Chinese",
"wuv": "Wuvulu-Aua",
"wux": "Wulna",
"wuy": "Wauyai",
"wwa": "Waama",
"wwb": "Wakabunga",
"wwo": "Wetamut; Dorig",
"wwr": "Warrwa",
"www": "Wawa",
"wxa": "Waxianghua",
"wxw": "Wardandi",
"wyb": "Wangaaybuwan-Ngiyambaa",
"wyi": "Woiwurrung",
"wym": "Wymysorys",
"wyn": "Wyandot",
"wyr": "Wayoró",
"wyy": "Western Fijian",
"xaa": "Andalusian Arabic",
"xab": "Sambe",
"xac": "Kachari",
"xad": "Adai",
"xae": "Aequian",
"xag": "Aghwan",
"xai": "Kaimbé",
"xaj": "Ararandewára",
"xak": "Máku",
"xal": "Kalmyk; Oirat",
"xam": "ǀXam",
"xan": "Xamtanga",
"xao": "Khao",
"xap": "Apalachee",
"xaq": "Aquitanian",
"xar": "Karami",
"xas": "Kamas",
"xat": "Katawixi",
"xau": "Kauwera",
"xav": "Xavánte",
"xaw": "Kawaiisu",
"xay": "Kayan Mahakam",
"xbb": "Lower Burdekin",
"xbc": "Bactrian",
"xbd": "Bindal",
"xbe": "Bigambal",
"xbg": "Bunganditj",
"xbi": "Kombio",
"xbj": "Birrpayi",
"xbm": "Middle Breton",
"xbn": "Kenaboi",
"xbo": "Bolgarian",
"xbp": "Bibbulman",
"xbr": "Kambera",
"xbw": "Kambiwá",
"xby": "Batjala; Batyala",
"xcb": "Cumbric",
"xcc": "Camunic",
"xce": "Celtiberian",
"xcg": "Cisalpine Gaulish",
"xch": "Chemakum; Chimakum",
"xcl": "Classical Armenian",
"xcm": "Comecrudo",
"xcn": "Cotoname",
"xco": "Chorasmian",
"xcr": "Carian",
"xct": "Classical Tibetan",
"xcu": "Curonian",
"xcv": "Chuvantsy",
"xcw": "Coahuilteco",
"xcy": "Cayuse",
"xda": "Darkinyung",
"xdc": "Dacian",
"xdk": "Dharuk",
"xdm": "Edomite",
"xdo": "Kwandu",
"xdq": "Kaitag",
"xdy": "Malayic Dayak",
"xeb": "Eblan",
"xed": "Hdi",
"xeg": "ǁXegwi",
"xel": "Kelo",
"xem": "Kembayan",
"xep": "Epi-Olmec",
"xer": "Xerénte",
"xes": "Kesawai",
"xet": "Xetá",
"xeu": "Keoru-Ahia",
"xfa": "Faliscan",
"xga": "Galatian",
"xgb": "Gbin",
"xgd": "Gudang",
"xgf": "Gabrielino-Fernandeño",
"xgg": "Goreng",
"xgi": "Garingbal",
"xgl": "Galindan",
"xgm": "Dharumbal; Guwinmal",
"xgn": "Mongolian languages",
"xgr": "Garza",
"xgu": "Unggumi",
"xgw": "Guwa",
"xh": "Xhosa",
"xha": "Harami",
"xhc": "Hunnic",
"xhd": "Hadrami",
"xhe": "Khetrani",
"xhm": "Middle Khmer (1400 to 1850 CE)",
"xhr": "Hernican",
"xht": "Hattic",
"xhu": "Hurrian",
"xhv": "Khua",
"xib": "Iberian",
"xii": "Xiri",
"xil": "Illyrian",
"xin": "Xinca",
"xir": "Xiriâna",
"xis": "Kisan",
"xiv": "Indus Valley Language",
"xiy": "Xipaya",
"xjb": "Minjungbal",
"xjt": "Jaitmatang",
"xka": "Kalkoti",
"xkb": "Northern Nago",
"xkc": "Kho'ini",
"xkd": "Mendalam Kayan",
"xke": "Kereho",
"xkf": "Khengkha",
"xkg": "Kagoro",
"xki": "Kenyan Sign Language",
"xkj": "Kajali",
"xkk": "Kachok; Kaco'",
"xkl": "Mainstream Kenyah",
"xkn": "Kayan River Kayan",
"xko": "Kiorr",
"xkp": "Kabatei",
"xkq": "Koroni",
"xkr": "Xakriabá",
"xks": "Kumbewaha",
"xkt": "Kantosi",
"xku": "Kaamba",
"xkv": "Kgalagadi",
"xkw": "Kembra",
"xkx": "Karore",
"xky": "Uma' Lasan",
"xkz": "Kurtokha",
"xla": "Kamula",
"xlb": "Loup B",
"xlc": "Lycian",
"xld": "Lydian",
"xle": "Lemnian",
"xlg": "Ligurian (Ancient)",
"xli": "Liburnian",
"xln": "Alanic",
"xlo": "Loup A",
"xlp": "Lepontic",
"xls": "Lusitanian",
"xlu": "Cuneiform Luwian",
"xly": "Elymian",
"xma": "Mushungulu",
"xmb": "Mbonga",
"xmc": "Makhuwa-Marrevone",
"xmd": "Mbudum",
"xme": "Median",
"xmf": "Mingrelian",
"xmg": "Mengaka",
"xmh": "Kugu-Muminh",
"xmj": "Majera",
"xmk": "Ancient Macedonian",
"xml": "Malaysian Sign Language",
"xmm": "Manado Malay",
"xmn": "Manichaean Middle Persian",
"xmo": "Morerebi",
"xmp": "Kuku-Mu'inh",
"xmq": "Kuku-Mangk",
"xmr": "Meroitic",
"xms": "Moroccan Sign Language",
"xmt": "Matbat",
"xmu": "Kamu",
"xmv": "Antankarana Malagasy; Tankarana Malagasy",
"xmw": "Tsimihety Malagasy",
"xmx": "Salawati; Maden",
"xmy": "Mayaguduna",
"xmz": "Mori Bawah",
"xna": "Ancient North Arabian",
"xnb": "Kanakanabu",
"xnd": "Na-Dene languages",
"xng": "Middle Mongolian",
"xnh": "Kuanhua",
"xni": "Ngarigu",
"xnj": "Ngoni (Tanzania)",
"xnk": "Nganakarti",
"xnm": "Ngumbarl",
"xnn": "Northern Kankanay",
"xno": "Anglo-Norman",
"xnq": "Ngoni (Mozambique)",
"xnr": "Kangri",
"xns": "Kanashi",
"xnt": "Narragansett",
"xnu": "Nukunul",
"xny": "Nyiyaparli",
"xnz": "Kenzi; Mattoki",
"xoc": "O'chi'chi'",
"xod": "Kokoda",
"xog": "Soga",
"xoi": "Kominimung",
"xok": "Xokleng",
"xom": "Komo (Sudan)",
"xon": "Konkomba",
"xoo": "Xukurú",
"xop": "Kopar",
"xor": "Korubo",
"xow": "Kowaki",
"xpa": "Pirriya",
"xpb": "Northeastern Tasmanian; Pyemmairrener",
"xpc": "Pecheneg",
"xpd": "Oyster Bay Tasmanian",
"xpe": "Liberia Kpelle",
"xpf": "Southeast Tasmanian; Nuenonne",
"xpg": "Phrygian",
"xph": "North Midlands Tasmanian; Tyerrenoterpanner",
"xpi": "Pictish",
"xpj": "Mpalitjanh",
"xpk": "Kulina Pano",
"xpl": "Port Sorell Tasmanian",
"xpm": "Pumpokol",
"xpn": "Kapinawá",
"xpo": "Pochutec",
"xpp": "Puyo-Paekche",
"xpq": "Mohegan-Pequot",
"xpr": "Parthian",
"xps": "Pisidian",
"xpt": "Punthamara",
"xpu": "Punic",
"xpv": "Northern Tasmanian; Tommeginne",
"xpw": "Northwestern Tasmanian; Peerapper",
"xpx": "Southwestern Tasmanian; Toogee",
"xpy": "Puyo",
"xpz": "Bruny Island Tasmanian",
"xqa": "Karakhanid",
"xqt": "Qatabanian",
"xra": "Krahô",
"xrb": "Eastern Karaboro",
"xrd": "Gundungurra",
"xre": "Kreye",
"xrg": "Minang",
"xri": "Krikati-Timbira",
"xrm": "Armazic",
"xrn": "Arin",
"xrr": "Raetic",
"xrt": "Aranama-Tamique",
"xru": "Marriammu",
"xrw": "Karawa",
"xsa": "Sabaean",
"xsb": "Sambal",
"xsc": "Scythian",
"xsd": "Sidetic",
"xse": "Sempan",
"xsh": "Shamang",
"xsi": "Sio",
"xsj": "Subi",
"xsl": "South Slavey",
"xsm": "Kasem",
"xsn": "Sanga (Nigeria)",
"xso": "Solano",
"xsp": "Silopi",
"xsq": "Makhuwa-Saka",
"xsr": "Sherpa",
"xss": "Assan",
"xsu": "Sanumá",
"xsv": "Sudovian",
"xsy": "Saisiyat",
"xta": "Alcozauca Mixtec",
"xtb": "Chazumba Mixtec",
"xtc": "Katcha-Kadugli-Miri",
"xtd": "Diuxi-Tilantongo Mixtec",
"xte": "Ketengban",
"xtg": "Transalpine Gaulish",
"xth": "Yitha Yitha",
"xti": "Sinicahua Mixtec",
"xtj": "San Juan Teita Mixtec",
"xtl": "Tijaltepec Mixtec",
"xtm": "Magdalena Peñasco Mixtec",
"xtn": "Northern Tlaxiaco Mixtec",
"xto": "Tokharian A",
"xtp": "San Miguel Piedras Mixtec",
"xtq": "Tumshuqese",
"xtr": "Early Tripuri",
"xts": "Sindihui Mixtec",
"xtt": "Tacahua Mixtec",
"xtu": "Cuyamecalco Mixtec",
"xtv": "Thawa",
"xtw": "Tawandê",
"xty": "Yoloxochitl Mixtec",
"xua": "Alu Kurumba",
"xub": "Betta Kurumba",
"xud": "Umiida",
"xug": "Kunigami",
"xuj": "Jennu Kurumba",
"xul": "Ngunawal; Nunukul",
"xum": "Umbrian",
"xun": "Unggaranggu",
"xuo": "Kuo",
"xup": "Upper Umpqua",
"xur": "Urartian",
"xut": "Kuthant",
"xuu": "Kxoe; Khwedam",
"xve": "Venetic",
"xvi": "Kamviri",
"xvn": "Vandalic",
"xvo": "Volscian",
"xvs": "Vestinian",
"xwa": "Kwaza",
"xwc": "Woccon",
"xwd": "Wadi Wadi",
"xwe": "Xwela Gbe",
"xwg": "Kwegu",
"xwj": "Wajuk",
"xwk": "Wangkumara",
"xwl": "Western Xwla Gbe",
"xwo": "Written Oirat",
"xwr": "Kwerba Mamberamo",
"xwt": "Wotjobaluk",
"xww": "Wemba Wemba",
"xxb": "Boro (Ghana)",
"xxk": "Ke'o",
"xxm": "Minkin",
"xxr": "Koropó",
"xxt": "Tambora",
"xya": "Yaygir",
"xyb": "Yandjibara",
"xyj": "Mayi-Yapi",
"xyk": "Mayi-Kulan",
"xyl": "Yalakalore",
"xyt": "Mayi-Thakurti",
"xyy": "Yorta Yorta",
"xzh": "Zhang-Zhung",
"xzm": "Zemgalian",
"xzp": "Ancient Zapotec",
"yaa": "Yaminahua",
"yab": "Yuhup",
"yac": "Pass Valley Yali",
"yad": "Yagua",
"yae": "Pumé",
"yaf": "Yaka (Democratic Republic of Congo)",
"yag": "Yámana",
"yah": "Yazgulyam",
"yai": "Yagnobi",
"yaj": "Banda-Yangere",
"yak": "Yakama",
"yal": "Yalunka",
"yam": "Yamba",
"yan": "Mayangna",
"yao": "Yao",
"yap": "Yapese",
"yaq": "Yaqui",
"yar": "Yabarana",
"yas": "Nugunu (Cameroon)",
"yat": "Yambeta",
"yau": "Yuwana",
"yav": "Yangben",
"yaw": "Yawalapití",
"yax": "Yauma",
"yay": "Agwagwune",
"yaz": "Lokaa",
"yba": "Yala",
"ybb": "Yemba",
"ybe": "West Yugur",
"ybh": "Yakha",
"ybi": "Yamphu",
"ybj": "Hasha",
"ybk": "Bokha",
"ybl": "Yukuben",
"ybm": "Yaben",
"ybn": "Yabaâna",
"ybo": "Yabong",
"ybx": "Yawiyo",
"yby": "Yaweyuha",
"ych": "Chesu",
"ycl": "Lolopo",
"ycn": "Yucuna",
"ycp": "Chepya",
"yda": "Yanda",
"ydd": "Eastern Yiddish",
"yde": "Yangum Dey",
"ydg": "Yidgha",
"ydk": "Yoidik",
"yea": "Ravula",
"yec": "Yeniche",
"yee": "Yimas",
"yei": "Yeni",
"yej": "Yevanic",
"yel": "Yela",
"yer": "Tarok",
"yes": "Nyankpa",
"yet": "Yetfa",
"yeu": "Yerukula",
"yev": "Yapunda",
"yey": "Yeyi",
"yga": "Malyangapa",
"ygi": "Yiningayi",
"ygl": "Yangum Gel",
"ygm": "Yagomi",
"ygp": "Gepo",
"ygr": "Yagaria",
"ygs": "Yolŋu Sign Language",
"ygu": "Yugul",
"ygw": "Yagwoia",
"yha": "Baha Buyang",
"yhd": "Judeo-Iraqi Arabic",
"yhl": "Hlepho Phowa",
"yhs": "Yan-nhaŋu Sign Language",
"yi": "Yiddish",
"yia": "Yinggarda",
"yif": "Ache",
"yig": "Wusa Nasu",
"yih": "Western Yiddish",
"yii": "Yidiny",
"yij": "Yindjibarndi",
"yik": "Dongshanba Lalo",
"yil": "Yindjilandji",
"yim": "Yimchungru Naga",
"yin": "Riang Lai; Yinchia",
"yip": "Pholo",
"yiq": "Miqie",
"yir": "North Awyu",
"yis": "Yis",
"yit": "Eastern Lalu",
"yiu": "Awu",
"yiv": "Northern Nisu",
"yix": "Axi Yi",
"yiz": "Azhe",
"yka": "Yakan",
"ykg": "Northern Yukaghir",
"yki": "Yoke",
"ykk": "Yakaikeke",
"ykl": "Khlula",
"ykm": "Kap",
"ykn": "Kua-nsi",
"yko": "Yasa",
"ykr": "Yekora",
"ykt": "Kathu",
"yku": "Kuamasi",
"yky": "Yakoma",
"yla": "Yaul",
"ylb": "Yaleba",
"yle": "Yele",
"ylg": "Yelogu",
"yli": "Angguruk Yali",
"yll": "Yil",
"ylm": "Limi",
"yln": "Langnian Buyang",
"ylo": "Naluo Yi",
"ylr": "Yalarnnga",
"ylu": "Aribwaung",
"yly": "Nyâlayu; Nyelâyu",
"ymb": "Yambes",
"ymc": "Southern Muji",
"ymd": "Muda",
"yme": "Yameo",
"ymg": "Yamongeri",
"ymh": "Mili",
"ymi": "Moji",
"ymk": "Makwe",
"yml": "Iamalele",
"ymm": "Maay",
"ymn": "Yamna; Sunum",
"ymo": "Yangum Mon",
"ymp": "Yamap",
"ymq": "Qila Muji",
"ymr": "Malasar",
"yms": "Mysian",
"ymx": "Northern Muji",
"ymz": "Muzi",
"yna": "Aluo",
"ynd": "Yandruwandha",
"yne": "Lang'e",
"yng": "Yango",
"ynk": "Naukan Yupik",
"ynl": "Yangulam",
"ynn": "Yana",
"yno": "Yong",
"ynq": "Yendang",
"yns": "Yansi",
"ynu": "Yahuna",
"yo": "Yoruba",
"yob": "Yoba",
"yog": "Yogad",
"yoi": "Yonaguni",
"yok": "Yokuts",
"yol": "Yola",
"yom": "Yombe",
"yon": "Yongkom",
"yot": "Yotti",
"yox": "Yoron",
"yoy": "Yoy",
"ypa": "Phala",
"ypb": "Labo Phowa",
"ypg": "Phola",
"yph": "Phupha",
"ypk": "Yupik languages",
"ypm": "Phuma",
"ypn": "Ani Phowa",
"ypo": "Alo Phola",
"ypp": "Phupa",
"ypz": "Phuza",
"yra": "Yerakai",
"yrb": "Yareba",
"yre": "Yaouré",
"yrk": "Nenets",
"yrl": "Nhengatu",
"yrm": "Yirrk-Mel",
"yrn": "Yerong",
"yro": "Yaroamë",
"yrs": "Yarsun",
"yrw": "Yarawata",
"yry": "Yarluyandi",
"ysc": "Yassic",
"ysd": "Samatao",
"ysg": "Sonaga",
"ysl": "Yugoslavian Sign Language",
"ysm": "Myanmar Sign Language",
"ysn": "Sani",
"yso": "Nisi (China)",
"ysp": "Southern Lolopo",
"ysr": "Sirenik Yupik",
"yss": "Yessan-Mayo",
"ysy": "Sanie",
"yta": "Talu",
"ytl": "Tanglang",
"ytp": "Thopho",
"ytw": "Yout Wam",
"yty": "Yatay",
"yua": "Yucateco; Yucatec Maya",
"yub": "Yugambal",
"yuc": "Yuchi",
"yud": "Judeo-Tripolitanian Arabic",
"yue": "Yue Chinese; Cantonese",
"yuf": "Havasupai-Walapai-Yavapai",
"yug": "Yug",
"yui": "Yurutí",
"yuj": "Karkar-Yuri",
"yuk": "Yuki",
"yul": "Yulu",
"yum": "Quechan",
"yun": "Bena (Nigeria)",
"yup": "Yukpa",
"yuq": "Yuqui",
"yur": "Yurok",
"yut": "Yopno",
"yuw": "Yau (Morobe Province)",
"yux": "Southern Yukaghir",
"yuy": "East Yugur",
"yuz": "Yuracare",
"yva": "Yawa",
"yvt": "Yavitero",
"ywa": "Kalou",
"ywg": "Yinhawangka",
"ywl": "Western Lalu",
"ywn": "Yawanawa",
"ywq": "Wuding-Luquan Yi",
"ywr": "Yawuru",
"ywt": "Xishanba Lalo; Central Lalo",
"ywu": "Wumeng Nasu",
"yww": "Yawarawarga",
"yxa": "Mayawali",
"yxg": "Yagara",
"yxl": "Yardliyawarra",
"yxm": "Yinwum",
"yxu": "Yuyu",
"yxy": "Yabula Yabula",
"yyr": "Yir Yoront",
"yyu": "Yau (Sandaun Province)",
"yyz": "Ayizi",
"yzg": "E'ma Buyang",
"yzk": "Zokhuo",
"za": "Zhuang; Chuang",
"zaa": "Sierra de Juárez Zapotec",
"zab": "Western Tlacolula Valley Zapotec; San Juan Guelavía Zapotec",
"zac": "Ocotlán Zapotec",
"zad": "Cajonos Zapotec",
"zae": "Yareni Zapotec",
"zaf": "Ayoquesco Zapotec",
"zag": "Zaghawa",
"zah": "Zangwal",
"zai": "Isthmus Zapotec",
"zaj": "Zaramo",
"zak": "Zanaki",
"zal": "Zauzou",
"zam": "Miahuatlán Zapotec",
"zao": "Ozolotepec Zapotec",
"zap": "Zapotec",
"zaq": "Aloápam Zapotec",
"zar": "Rincón Zapotec",
"zas": "Santo Domingo Albarradas Zapotec",
"zat": "Tabaa Zapotec",
"zau": "Zangskari",
"zav": "Yatzachi Zapotec",
"zaw": "Mitla Zapotec",
"zax": "Xadani Zapotec",
"zay": "Zayse-Zergulla; Zaysete",
"zaz": "Zari",
"zba": "Balaibalan",
"zbc": "Central Berawan",
"zbe": "East Berawan",
"zbl": "Blissymbols; Bliss; Blissymbolics",
"zbt": "Batui",
"zbu": "Bu (Bauchi State)",
"zbw": "West Berawan",
"zca": "Coatecas Altas Zapotec",
"zcd": "Las Delicias Zapotec",
"zch": "Central Hongshuihe Zhuang",
"zdj": "Ngazidja Comorian",
"zea": "Zeeuws",
"zeg": "Zenag",
"zeh": "Eastern Hongshuihe Zhuang",
"zen": "Zenaga",
"zga": "Kinga",
"zgb": "Guibei Zhuang",
"zgh": "Standard Moroccan Tamazight",
"zgm": "Minz Zhuang",
"zgn": "Guibian Zhuang",
"zgr": "Magori",
"zh": "Chinese",
"zhb": "Zhaba",
"zhd": "Dai Zhuang",
"zhi": "Zhire",
"zhn": "Nong Zhuang",
"zhw": "Zhoa",
"zhx": "Chinese (family)",
"zia": "Zia",
"zib": "Zimbabwe Sign Language",
"zik": "Zimakani",
"zil": "Zialo",
"zim": "Mesme",
"zin": "Zinza",
"ziw": "Zigula",
"ziz": "Zizilivakan",
"zka": "Kaimbulawa",
"zkb": "Koibal",
"zkd": "Kadu",
"zkg": "Koguryo",
"zkh": "Khorezmian",
"zkk": "Karankawa",
"zkn": "Kanan",
"zko": "Kott",
"zkp": "São Paulo Kaingáng",
"zkr": "Zakhring",
"zkt": "Kitan",
"zku": "Kaurna",
"zkv": "Krevinian",
"zkz": "Khazar",
"zla": "Zula",
"zle": "East Slavic languages",
"zlj": "Liujiang Zhuang",
"zlm": "Malay (individual language)",
"zln": "Lianshan Zhuang",
"zlq": "Liuqian Zhuang",
"zls": "South Slavic languages",
"zlw": "West Slavic languages",
"zma": "Manda (Australia)",
"zmb": "Zimba",
"zmc": "Margany",
"zmd": "Maridan",
"zme": "Mangerr",
"zmf": "Mfinu",
"zmg": "Marti Ke",
"zmh": "Makolkol",
"zmi": "Negeri Sembilan Malay",
"zmj": "Maridjabin",
"zmk": "Mandandanyi",
"zml": "Matngala",
"zmm": "Marimanindji; Marramaninyshi",
"zmn": "Mbangwe",
"zmo": "Molo",
"zmp": "Mpuono",
"zmq": "Mituku",
"zmr": "Maranunggu",
"zms": "Mbesa",
"zmt": "Maringarr",
"zmu": "Muruwari",
"zmv": "Mbariman-Gudhinma",
"zmw": "Mbo (Democratic Republic of Congo)",
"zmx": "Bomitaba",
"zmy": "Mariyedi",
"zmz": "Mbandja",
"zna": "Zan Gula",
"znd": "Zande languages",
"zne": "Zande (individual language)",
"zng": "Mang",
"znk": "Manangkari",
"zns": "Mangas",
"zoc": "Copainalá Zoque",
"zoh": "Chimalapa Zoque",
"zom": "Zou",
"zoo": "Asunción Mixtepec Zapotec",
"zoq": "Tabasco Zoque",
"zor": "Rayón Zoque",
"zos": "Francisco León Zoque",
"zpa": "Lachiguiri Zapotec",
"zpb": "Yautepec Zapotec",
"zpc": "Choapan Zapotec",
"zpd": "Southeastern Ixtlán Zapotec",
"zpe": "Petapa Zapotec",
"zpf": "San Pedro Quiatoni Zapotec",
"zpg": "Guevea De Humboldt Zapotec",
"zph": "Totomachapan Zapotec",
"zpi": "Santa María Quiegolani Zapotec",
"zpj": "Quiavicuzas Zapotec",
"zpk": "Tlacolulita Zapotec",
"zpl": "Lachixío Zapotec",
"zpm": "Mixtepec Zapotec",
"zpn": "Santa Inés Yatzechi Zapotec",
"zpo": "Amatlán Zapotec",
"zpp": "El Alto Zapotec",
"zpq": "Zoogocho Zapotec",
"zpr": "Santiago Xanica Zapotec",
"zps": "Coatlán Zapotec",
"zpt": "San Vicente Coatlán Zapotec",
"zpu": "Yalálag Zapotec",
"zpv": "Chichicapan Zapotec",
"zpw": "Zaniza Zapotec",
"zpx": "San Baltazar Loxicha Zapotec",
"zpy": "Mazaltepec Zapotec",
"zpz": "Texmelucan Zapotec",
"zqe": "Qiubei Zhuang",
"zra": "Kara (Korea)",
"zrg": "Mirgan",
"zrn": "Zerenkel",
"zro": "Záparo",
"zrp": "Zarphatic",
"zrs": "Mairasi",
"zsa": "Sarasira",
"zsk": "Kaskean",
"zsl": "Zambian Sign Language",
"zsm": "Standard Malay",
"zsr": "Southern Rincon Zapotec",
"zsu": "Sukurum",
"zte": "Elotepec Zapotec",
"ztg": "Xanaguía Zapotec",
"ztl": "Lapaguía-Guivini Zapotec",
"ztm": "San Agustín Mixtepec Zapotec",
"ztn": "Santa Catarina Albarradas Zapotec",
"ztp": "Loxicha Zapotec",
"ztq": "Quioquitani-Quierí Zapotec",
"zts": "Tilquiapan Zapotec",
"ztt": "Tejalapan Zapotec",
"ztu": "Güilá Zapotec",
"ztx": "Zaachila Zapotec",
"zty": "Yatee Zapotec",
"zu": "Zulu",
"zua": "Zeem",
"zuh": "Tokano",
"zum": "Kumzari",
"zun": "Zuni",
"zuy": "Zumaya",
"zwa": "Zay",
"zyb": "Yongbei Zhuang",
"zyg": "Yang Zhuang",
"zyj": "Youjiang Zhuang",
"zyn": "Yongnan Zhuang",
"zyp": "Zyphe Chin",
"zza": "Zaza; Dimili; Dimli (macrolanguage); Kirdki; Kirmanjki (macrolanguage); Zazaki",
"zzj": "Zuojiang Zhuang"
} | datasets/src/datasets/utils/resources/languages.json/0 | {
"file_path": "datasets/src/datasets/utils/resources/languages.json",
"repo_id": "datasets",
"token_count": 111198
} | 81 |
import os
import tarfile
import pyarrow as pa
import pytest
from datasets import Dataset, concatenate_datasets, load_dataset
from datasets.features import Audio, Features, Sequence, Value
from ..utils import (
require_sndfile,
)
@pytest.fixture()
def tar_wav_path(shared_datadir, tmp_path_factory):
audio_path = str(shared_datadir / "test_audio_44100.wav")
path = tmp_path_factory.mktemp("data") / "audio_data.wav.tar"
with tarfile.TarFile(path, "w") as f:
f.add(audio_path, arcname=os.path.basename(audio_path))
return path
@pytest.fixture()
def tar_mp3_path(shared_datadir, tmp_path_factory):
audio_path = str(shared_datadir / "test_audio_44100.mp3")
path = tmp_path_factory.mktemp("data") / "audio_data.mp3.tar"
with tarfile.TarFile(path, "w") as f:
f.add(audio_path, arcname=os.path.basename(audio_path))
return path
def iter_archive(archive_path):
with tarfile.open(archive_path) as tar:
for tarinfo in tar:
file_path = tarinfo.name
file_obj = tar.extractfile(tarinfo)
yield file_path, file_obj
def test_audio_instantiation():
audio = Audio()
assert audio.sampling_rate is None
assert audio.mono is True
assert audio.id is None
assert audio.dtype == "dict"
assert audio.pa_type == pa.struct({"bytes": pa.binary(), "path": pa.string()})
assert audio._type == "Audio"
def test_audio_feature_type_to_arrow():
features = Features({"audio": Audio()})
assert features.arrow_schema == pa.schema({"audio": Audio().pa_type})
features = Features({"struct_containing_an_audio": {"audio": Audio()}})
assert features.arrow_schema == pa.schema({"struct_containing_an_audio": pa.struct({"audio": Audio().pa_type})})
features = Features({"sequence_of_audios": Sequence(Audio())})
assert features.arrow_schema == pa.schema({"sequence_of_audios": pa.list_(Audio().pa_type)})
@pytest.mark.parametrize(
"build_example",
[
lambda audio_path: audio_path,
lambda audio_path: open(audio_path, "rb").read(),
lambda audio_path: {"path": audio_path},
lambda audio_path: {"path": audio_path, "bytes": None},
lambda audio_path: {"path": audio_path, "bytes": open(audio_path, "rb").read()},
lambda audio_path: {"path": None, "bytes": open(audio_path, "rb").read()},
lambda audio_path: {"bytes": open(audio_path, "rb").read()},
lambda audio_path: {"array": [0.1, 0.2, 0.3], "sampling_rate": 16_000},
],
)
def test_audio_feature_encode_example(shared_datadir, build_example):
audio_path = str(shared_datadir / "test_audio_44100.wav")
audio = Audio()
encoded_example = audio.encode_example(build_example(audio_path))
assert isinstance(encoded_example, dict)
assert encoded_example.keys() == {"bytes", "path"}
assert encoded_example["bytes"] is not None or encoded_example["path"] is not None
decoded_example = audio.decode_example(encoded_example)
assert decoded_example.keys() == {"path", "array", "sampling_rate"}
@pytest.mark.parametrize(
"build_example",
[
lambda audio_path: {"path": audio_path, "sampling_rate": 16_000},
lambda audio_path: {"path": audio_path, "bytes": None, "sampling_rate": 16_000},
lambda audio_path: {"path": audio_path, "bytes": open(audio_path, "rb").read(), "sampling_rate": 16_000},
lambda audio_path: {"array": [0.1, 0.2, 0.3], "sampling_rate": 16_000},
],
)
def test_audio_feature_encode_example_pcm(shared_datadir, build_example):
audio_path = str(shared_datadir / "test_audio_16000.pcm")
audio = Audio(sampling_rate=16_000)
encoded_example = audio.encode_example(build_example(audio_path))
assert isinstance(encoded_example, dict)
assert encoded_example.keys() == {"bytes", "path"}
assert encoded_example["bytes"] is not None or encoded_example["path"] is not None
decoded_example = audio.decode_example(encoded_example)
assert decoded_example.keys() == {"path", "array", "sampling_rate"}
@require_sndfile
def test_audio_decode_example(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
audio = Audio()
decoded_example = audio.decode_example(audio.encode_example(audio_path))
assert decoded_example.keys() == {"path", "array", "sampling_rate"}
assert decoded_example["path"] == audio_path
assert decoded_example["array"].shape == (202311,)
assert decoded_example["sampling_rate"] == 44100
with pytest.raises(RuntimeError):
Audio(decode=False).decode_example(audio_path)
@require_sndfile
def test_audio_resampling(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
audio = Audio(sampling_rate=16000)
decoded_example = audio.decode_example(audio.encode_example(audio_path))
assert decoded_example.keys() == {"path", "array", "sampling_rate"}
assert decoded_example["path"] == audio_path
assert decoded_example["array"].shape == (73401,)
assert decoded_example["sampling_rate"] == 16000
@require_sndfile
def test_audio_decode_example_mp3(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.mp3")
audio = Audio()
decoded_example = audio.decode_example(audio.encode_example(audio_path))
assert decoded_example.keys() == {"path", "array", "sampling_rate"}
assert decoded_example["path"] == audio_path
assert decoded_example["array"].shape == (110592,)
assert decoded_example["sampling_rate"] == 44100
@require_sndfile
def test_audio_decode_example_opus(shared_datadir):
audio_path = str(shared_datadir / "test_audio_48000.opus")
audio = Audio()
decoded_example = audio.decode_example(audio.encode_example(audio_path))
assert decoded_example.keys() == {"path", "array", "sampling_rate"}
assert decoded_example["path"] == audio_path
assert decoded_example["array"].shape == (48000,)
assert decoded_example["sampling_rate"] == 48000
@pytest.mark.parametrize("sampling_rate", [16_000, 48_000])
def test_audio_decode_example_pcm(shared_datadir, sampling_rate):
audio_path = str(shared_datadir / "test_audio_16000.pcm")
audio_input = {"path": audio_path, "sampling_rate": 16_000}
audio = Audio(sampling_rate=sampling_rate)
decoded_example = audio.decode_example(audio.encode_example(audio_input))
assert decoded_example.keys() == {"path", "array", "sampling_rate"}
assert decoded_example["path"] is None
assert decoded_example["array"].shape == (16208 * sampling_rate // 16_000,)
assert decoded_example["sampling_rate"] == sampling_rate
@require_sndfile
def test_audio_resampling_mp3_different_sampling_rates(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.mp3")
audio_path2 = str(shared_datadir / "test_audio_16000.mp3")
audio = Audio(sampling_rate=48000)
decoded_example = audio.decode_example(audio.encode_example(audio_path))
assert decoded_example.keys() == {"path", "array", "sampling_rate"}
assert decoded_example["path"] == audio_path
assert decoded_example["array"].shape == (120373,)
assert decoded_example["sampling_rate"] == 48000
decoded_example = audio.decode_example(audio.encode_example(audio_path2))
assert decoded_example.keys() == {"path", "array", "sampling_rate"}
assert decoded_example["path"] == audio_path2
assert decoded_example["array"].shape == (122688,)
assert decoded_example["sampling_rate"] == 48000
@require_sndfile
def test_dataset_with_audio_feature(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = {"audio": [audio_path]}
features = Features({"audio": Audio()})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
assert item["audio"]["path"] == audio_path
assert item["audio"]["array"].shape == (202311,)
assert item["audio"]["sampling_rate"] == 44100
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert batch["audio"][0]["path"] == audio_path
assert batch["audio"][0]["array"].shape == (202311,)
assert batch["audio"][0]["sampling_rate"] == 44100
column = dset["audio"]
assert len(column) == 1
assert column[0].keys() == {"path", "array", "sampling_rate"}
assert column[0]["path"] == audio_path
assert column[0]["array"].shape == (202311,)
assert column[0]["sampling_rate"] == 44100
@require_sndfile
def test_dataset_with_audio_feature_tar_wav(tar_wav_path):
audio_filename = "test_audio_44100.wav"
data = {"audio": []}
for file_path, file_obj in iter_archive(tar_wav_path):
data["audio"].append({"path": file_path, "bytes": file_obj.read()})
break
features = Features({"audio": Audio()})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
assert item["audio"]["path"] == audio_filename
assert item["audio"]["array"].shape == (202311,)
assert item["audio"]["sampling_rate"] == 44100
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert batch["audio"][0]["path"] == audio_filename
assert batch["audio"][0]["array"].shape == (202311,)
assert batch["audio"][0]["sampling_rate"] == 44100
column = dset["audio"]
assert len(column) == 1
assert column[0].keys() == {"path", "array", "sampling_rate"}
assert column[0]["path"] == audio_filename
assert column[0]["array"].shape == (202311,)
assert column[0]["sampling_rate"] == 44100
@require_sndfile
def test_dataset_with_audio_feature_tar_mp3(tar_mp3_path):
audio_filename = "test_audio_44100.mp3"
data = {"audio": []}
for file_path, file_obj in iter_archive(tar_mp3_path):
data["audio"].append({"path": file_path, "bytes": file_obj.read()})
break
features = Features({"audio": Audio()})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
assert item["audio"]["path"] == audio_filename
assert item["audio"]["array"].shape == (110592,)
assert item["audio"]["sampling_rate"] == 44100
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert batch["audio"][0]["path"] == audio_filename
assert batch["audio"][0]["array"].shape == (110592,)
assert batch["audio"][0]["sampling_rate"] == 44100
column = dset["audio"]
assert len(column) == 1
assert column[0].keys() == {"path", "array", "sampling_rate"}
assert column[0]["path"] == audio_filename
assert column[0]["array"].shape == (110592,)
assert column[0]["sampling_rate"] == 44100
@require_sndfile
def test_dataset_with_audio_feature_with_none():
data = {"audio": [None]}
features = Features({"audio": Audio()})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"] is None
batch = dset[:1]
assert len(batch) == 1
assert batch.keys() == {"audio"}
assert isinstance(batch["audio"], list) and all(item is None for item in batch["audio"])
column = dset["audio"]
assert len(column) == 1
assert isinstance(column, list) and all(item is None for item in column)
# nested tests
data = {"audio": [[None]]}
features = Features({"audio": Sequence(Audio())})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"audio"}
assert all(i is None for i in item["audio"])
data = {"nested": [{"audio": None}]}
features = Features({"nested": {"audio": Audio()}})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"nested"}
assert item["nested"].keys() == {"audio"}
assert item["nested"]["audio"] is None
@require_sndfile
def test_resampling_at_loading_dataset_with_audio_feature(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = {"audio": [audio_path]}
features = Features({"audio": Audio(sampling_rate=16000)})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
assert item["audio"]["path"] == audio_path
assert item["audio"]["array"].shape == (73401,)
assert item["audio"]["sampling_rate"] == 16000
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert batch["audio"][0]["path"] == audio_path
assert batch["audio"][0]["array"].shape == (73401,)
assert batch["audio"][0]["sampling_rate"] == 16000
column = dset["audio"]
assert len(column) == 1
assert column[0].keys() == {"path", "array", "sampling_rate"}
assert column[0]["path"] == audio_path
assert column[0]["array"].shape == (73401,)
assert column[0]["sampling_rate"] == 16000
@require_sndfile
def test_resampling_at_loading_dataset_with_audio_feature_mp3(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.mp3")
data = {"audio": [audio_path]}
features = Features({"audio": Audio(sampling_rate=16000)})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
assert item["audio"]["path"] == audio_path
assert item["audio"]["array"].shape == (40125,)
assert item["audio"]["sampling_rate"] == 16000
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert batch["audio"][0]["path"] == audio_path
assert batch["audio"][0]["array"].shape == (40125,)
assert batch["audio"][0]["sampling_rate"] == 16000
column = dset["audio"]
assert len(column) == 1
assert column[0].keys() == {"path", "array", "sampling_rate"}
assert column[0]["path"] == audio_path
assert column[0]["array"].shape == (40125,)
assert column[0]["sampling_rate"] == 16000
@require_sndfile
def test_resampling_after_loading_dataset_with_audio_feature(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = {"audio": [audio_path]}
features = Features({"audio": Audio()})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item["audio"]["sampling_rate"] == 44100
dset = dset.cast_column("audio", Audio(sampling_rate=16000))
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
assert item["audio"]["path"] == audio_path
assert item["audio"]["array"].shape == (73401,)
assert item["audio"]["sampling_rate"] == 16000
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert batch["audio"][0]["path"] == audio_path
assert batch["audio"][0]["array"].shape == (73401,)
assert batch["audio"][0]["sampling_rate"] == 16000
column = dset["audio"]
assert len(column) == 1
assert column[0].keys() == {"path", "array", "sampling_rate"}
assert column[0]["path"] == audio_path
assert column[0]["array"].shape == (73401,)
assert column[0]["sampling_rate"] == 16000
@require_sndfile
def test_resampling_after_loading_dataset_with_audio_feature_mp3(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.mp3")
data = {"audio": [audio_path]}
features = Features({"audio": Audio()})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item["audio"]["sampling_rate"] == 44100
dset = dset.cast_column("audio", Audio(sampling_rate=16000))
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
assert item["audio"]["path"] == audio_path
assert item["audio"]["array"].shape == (40125,)
assert item["audio"]["sampling_rate"] == 16000
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert batch["audio"][0]["path"] == audio_path
assert batch["audio"][0]["array"].shape == (40125,)
assert batch["audio"][0]["sampling_rate"] == 16000
column = dset["audio"]
assert len(column) == 1
assert column[0].keys() == {"path", "array", "sampling_rate"}
assert column[0]["path"] == audio_path
assert column[0]["array"].shape == (40125,)
assert column[0]["sampling_rate"] == 16000
@pytest.mark.parametrize(
"build_data",
[
lambda audio_path: {"audio": [audio_path]},
lambda audio_path: {"audio": [open(audio_path, "rb").read()]},
lambda audio_path: {"audio": [{"path": audio_path}]},
lambda audio_path: {"audio": [{"path": audio_path, "bytes": None}]},
lambda audio_path: {"audio": [{"path": audio_path, "bytes": open(audio_path, "rb").read()}]},
lambda audio_path: {"audio": [{"path": None, "bytes": open(audio_path, "rb").read()}]},
lambda audio_path: {"audio": [{"bytes": open(audio_path, "rb").read()}]},
],
)
def test_dataset_cast_to_audio_features(shared_datadir, build_data):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = build_data(audio_path)
dset = Dataset.from_dict(data)
item = dset.cast(Features({"audio": Audio()}))[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
item = dset.cast_column("audio", Audio())[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
def test_dataset_concatenate_audio_features(shared_datadir):
# we use a different data structure between 1 and 2 to make sure they are compatible with each other
audio_path = str(shared_datadir / "test_audio_44100.wav")
data1 = {"audio": [audio_path]}
dset1 = Dataset.from_dict(data1, features=Features({"audio": Audio()}))
data2 = {"audio": [{"bytes": open(audio_path, "rb").read()}]}
dset2 = Dataset.from_dict(data2, features=Features({"audio": Audio()}))
concatenated_dataset = concatenate_datasets([dset1, dset2])
assert len(concatenated_dataset) == len(dset1) + len(dset2)
assert concatenated_dataset[0]["audio"]["array"].shape == dset1[0]["audio"]["array"].shape
assert concatenated_dataset[1]["audio"]["array"].shape == dset2[0]["audio"]["array"].shape
def test_dataset_concatenate_nested_audio_features(shared_datadir):
# we use a different data structure between 1 and 2 to make sure they are compatible with each other
audio_path = str(shared_datadir / "test_audio_44100.wav")
features = Features({"list_of_structs_of_audios": [{"audio": Audio()}]})
data1 = {"list_of_structs_of_audios": [[{"audio": audio_path}]]}
dset1 = Dataset.from_dict(data1, features=features)
data2 = {"list_of_structs_of_audios": [[{"audio": {"bytes": open(audio_path, "rb").read()}}]]}
dset2 = Dataset.from_dict(data2, features=features)
concatenated_dataset = concatenate_datasets([dset1, dset2])
assert len(concatenated_dataset) == len(dset1) + len(dset2)
assert (
concatenated_dataset[0]["list_of_structs_of_audios"][0]["audio"]["array"].shape
== dset1[0]["list_of_structs_of_audios"][0]["audio"]["array"].shape
)
assert (
concatenated_dataset[1]["list_of_structs_of_audios"][0]["audio"]["array"].shape
== dset2[0]["list_of_structs_of_audios"][0]["audio"]["array"].shape
)
@require_sndfile
def test_dataset_with_audio_feature_map_is_not_decoded(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = {"audio": [audio_path], "text": ["Hello"]}
features = Features({"audio": Audio(), "text": Value("string")})
dset = Dataset.from_dict(data, features=features)
expected_audio = features.encode_batch(data)["audio"][0]
for item in dset.cast_column("audio", Audio(decode=False)):
assert item.keys() == {"audio", "text"}
assert item == {"audio": expected_audio, "text": "Hello"}
def process_text(example):
example["text"] = example["text"] + " World!"
return example
processed_dset = dset.map(process_text)
for item in processed_dset.cast_column("audio", Audio(decode=False)):
assert item.keys() == {"audio", "text"}
assert item == {"audio": expected_audio, "text": "Hello World!"}
@require_sndfile
def test_dataset_with_audio_feature_map_is_decoded(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = {"audio": [audio_path], "text": ["Hello"]}
features = Features({"audio": Audio(), "text": Value("string")})
dset = Dataset.from_dict(data, features=features)
def process_audio_sampling_rate_by_example(example):
example["double_sampling_rate"] = 2 * example["audio"]["sampling_rate"]
return example
decoded_dset = dset.map(process_audio_sampling_rate_by_example)
for item in decoded_dset.cast_column("audio", Audio(decode=False)):
assert item.keys() == {"audio", "text", "double_sampling_rate"}
assert item["double_sampling_rate"] == 88200
def process_audio_sampling_rate_by_batch(batch):
double_sampling_rates = []
for audio in batch["audio"]:
double_sampling_rates.append(2 * audio["sampling_rate"])
batch["double_sampling_rate"] = double_sampling_rates
return batch
decoded_dset = dset.map(process_audio_sampling_rate_by_batch, batched=True)
for item in decoded_dset.cast_column("audio", Audio(decode=False)):
assert item.keys() == {"audio", "text", "double_sampling_rate"}
assert item["double_sampling_rate"] == 88200
@require_sndfile
def test_formatted_dataset_with_audio_feature(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = {"audio": [audio_path, audio_path]}
features = Features({"audio": Audio()})
dset = Dataset.from_dict(data, features=features)
with dset.formatted_as("numpy"):
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
assert item["audio"]["path"] == audio_path
assert item["audio"]["array"].shape == (202311,)
assert item["audio"]["sampling_rate"] == 44100
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert batch["audio"][0]["path"] == audio_path
assert batch["audio"][0]["array"].shape == (202311,)
assert batch["audio"][0]["sampling_rate"] == 44100
column = dset["audio"]
assert len(column) == 2
assert column[0].keys() == {"path", "array", "sampling_rate"}
assert column[0]["path"] == audio_path
assert column[0]["array"].shape == (202311,)
assert column[0]["sampling_rate"] == 44100
with dset.formatted_as("pandas"):
item = dset[0]
assert item.shape == (1, 1)
assert item.columns == ["audio"]
assert item["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert item["audio"][0]["path"] == audio_path
assert item["audio"][0]["array"].shape == (202311,)
assert item["audio"][0]["sampling_rate"] == 44100
batch = dset[:1]
assert batch.shape == (1, 1)
assert batch.columns == ["audio"]
assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert batch["audio"][0]["path"] == audio_path
assert batch["audio"][0]["array"].shape == (202311,)
assert batch["audio"][0]["sampling_rate"] == 44100
column = dset["audio"]
assert len(column) == 2
assert column[0].keys() == {"path", "array", "sampling_rate"}
assert column[0]["path"] == audio_path
assert column[0]["array"].shape == (202311,)
assert column[0]["sampling_rate"] == 44100
@pytest.fixture
def jsonl_audio_dataset_path(shared_datadir, tmp_path_factory):
import json
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = [{"audio": audio_path, "text": "Hello world!"}]
path = str(tmp_path_factory.mktemp("data") / "audio_dataset.jsonl")
with open(path, "w") as f:
for item in data:
f.write(json.dumps(item) + "\n")
return path
@require_sndfile
@pytest.mark.parametrize("streaming", [False, True])
def test_load_dataset_with_audio_feature(streaming, jsonl_audio_dataset_path, shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data_files = jsonl_audio_dataset_path
features = Features({"audio": Audio(), "text": Value("string")})
dset = load_dataset("json", split="train", data_files=data_files, features=features, streaming=streaming)
item = dset[0] if not streaming else next(iter(dset))
assert item.keys() == {"audio", "text"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
assert item["audio"]["path"] == audio_path
assert item["audio"]["array"].shape == (202311,)
assert item["audio"]["sampling_rate"] == 44100
@require_sndfile
@pytest.mark.integration
def test_dataset_with_audio_feature_loaded_from_cache():
# load first time
ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean")
# load from cache
ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation")
assert isinstance(ds, Dataset)
def test_dataset_with_audio_feature_undecoded(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = {"audio": [audio_path]}
features = Features({"audio": Audio(decode=False)})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"] == {"path": audio_path, "bytes": None}
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0] == {"path": audio_path, "bytes": None}
column = dset["audio"]
assert len(column) == 1
assert column[0] == {"path": audio_path, "bytes": None}
def test_formatted_dataset_with_audio_feature_undecoded(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = {"audio": [audio_path]}
features = Features({"audio": Audio(decode=False)})
dset = Dataset.from_dict(data, features=features)
with dset.formatted_as("numpy"):
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"] == {"path": audio_path, "bytes": None}
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0] == {"path": audio_path, "bytes": None}
column = dset["audio"]
assert len(column) == 1
assert column[0] == {"path": audio_path, "bytes": None}
with dset.formatted_as("pandas"):
item = dset[0]
assert item.shape == (1, 1)
assert item.columns == ["audio"]
assert item["audio"][0] == {"path": audio_path, "bytes": None}
batch = dset[:1]
assert batch.shape == (1, 1)
assert batch.columns == ["audio"]
assert batch["audio"][0] == {"path": audio_path, "bytes": None}
column = dset["audio"]
assert len(column) == 1
assert column[0] == {"path": audio_path, "bytes": None}
def test_dataset_with_audio_feature_map_undecoded(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = {"audio": [audio_path]}
features = Features({"audio": Audio(decode=False)})
dset = Dataset.from_dict(data, features=features)
def assert_audio_example_undecoded(example):
assert example["audio"] == {"path": audio_path, "bytes": None}
dset.map(assert_audio_example_undecoded)
def assert_audio_batch_undecoded(batch):
for audio in batch["audio"]:
assert audio == {"path": audio_path, "bytes": None}
dset.map(assert_audio_batch_undecoded, batched=True)
def test_audio_embed_storage(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
example = {"bytes": None, "path": audio_path}
storage = pa.array([example], type=pa.struct({"bytes": pa.binary(), "path": pa.string()}))
embedded_storage = Audio().embed_storage(storage)
embedded_example = embedded_storage.to_pylist()[0]
assert embedded_example == {"bytes": open(audio_path, "rb").read(), "path": "test_audio_44100.wav"}
| datasets/tests/features/test_audio.py/0 | {
"file_path": "datasets/tests/features/test_audio.py",
"repo_id": "datasets",
"token_count": 11528
} | 82 |
import os
import tempfile
from unittest import TestCase
import numpy as np
import pandas as pd
import pytest
from datasets import load_from_disk
from datasets.arrow_dataset import Dataset
from datasets.dataset_dict import DatasetDict, IterableDatasetDict
from datasets.features import ClassLabel, Features, Sequence, Value
from datasets.iterable_dataset import IterableDataset
from datasets.splits import NamedSplit
from .utils import (
assert_arrow_memory_doesnt_increase,
assert_arrow_memory_increases,
require_polars,
require_tf,
require_torch,
)
class DatasetDictTest(TestCase):
def _create_dummy_dataset(self, multiple_columns=False):
if multiple_columns:
data = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
dset = Dataset.from_dict(data)
else:
dset = Dataset.from_dict(
{"filename": ["my_name-train" + "_" + f"{x:03d}" for x in np.arange(30).tolist()]}
)
return dset
def _create_dummy_dataset_dict(self, multiple_columns=False) -> DatasetDict:
return DatasetDict(
{
"train": self._create_dummy_dataset(multiple_columns=multiple_columns),
"test": self._create_dummy_dataset(multiple_columns=multiple_columns),
}
)
def _create_dummy_iterable_dataset(self, multiple_columns=False) -> IterableDataset:
def gen():
if multiple_columns:
data = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
for v1, v2 in zip(data["col_1"], data["col_2"]):
yield {"col_1": v1, "col_2": v2}
else:
for x in range(30):
yield {"filename": "my_name-train" + "_" + f"{x:03d}"}
return IterableDataset.from_generator(gen)
def _create_dummy_iterable_dataset_dict(self, multiple_columns=False) -> IterableDatasetDict:
return IterableDatasetDict(
{
"train": self._create_dummy_iterable_dataset(multiple_columns=multiple_columns),
"test": self._create_dummy_iterable_dataset(multiple_columns=multiple_columns),
}
)
def test_flatten(self):
dset_split = Dataset.from_dict(
{"a": [{"b": {"c": ["text"]}}] * 10, "foo": [1] * 10},
features=Features({"a": {"b": Sequence({"c": Value("string")})}, "foo": Value("int64")}),
)
dset = DatasetDict({"train": dset_split, "test": dset_split})
dset = dset.flatten()
self.assertDictEqual(dset.column_names, {"train": ["a.b.c", "foo"], "test": ["a.b.c", "foo"]})
self.assertListEqual(sorted(dset["train"].features.keys()), ["a.b.c", "foo"])
self.assertDictEqual(
dset["train"].features, Features({"a.b.c": Sequence(Value("string")), "foo": Value("int64")})
)
del dset
def test_set_format_numpy(self):
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset.set_format(type="numpy", columns=["col_1"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 1)
self.assertIsInstance(dset_split[0]["col_1"], np.int64)
self.assertEqual(dset_split[0]["col_1"].item(), 3)
dset.reset_format()
with dset.formatted_as(type="numpy", columns=["col_1"]):
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 1)
self.assertIsInstance(dset_split[0]["col_1"], np.int64)
self.assertEqual(dset_split[0]["col_1"].item(), 3)
for dset_split in dset.values():
self.assertEqual(dset_split.format["type"], None)
self.assertEqual(dset_split.format["format_kwargs"], {})
self.assertEqual(dset_split.format["columns"], dset_split.column_names)
self.assertEqual(dset_split.format["output_all_columns"], False)
dset.set_format(type="numpy", columns=["col_1"], output_all_columns=True)
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 2)
self.assertIsInstance(dset_split[0]["col_2"], str)
self.assertEqual(dset_split[0]["col_2"], "a")
dset.set_format(type="numpy", columns=["col_1", "col_2"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 2)
self.assertIsInstance(dset_split[0]["col_2"], np.str_)
self.assertEqual(dset_split[0]["col_2"].item(), "a")
del dset
@require_torch
def test_set_format_torch(self):
import torch
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset.set_format(type="torch", columns=["col_1"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 1)
self.assertIsInstance(dset_split[0]["col_1"], torch.Tensor)
self.assertListEqual(list(dset_split[0]["col_1"].shape), [])
self.assertEqual(dset_split[0]["col_1"].item(), 3)
dset.set_format(type="torch", columns=["col_1"], output_all_columns=True)
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 2)
self.assertIsInstance(dset_split[0]["col_2"], str)
self.assertEqual(dset_split[0]["col_2"], "a")
dset.set_format(type="torch")
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 2)
self.assertIsInstance(dset_split[0]["col_1"], torch.Tensor)
self.assertListEqual(list(dset_split[0]["col_1"].shape), [])
self.assertEqual(dset_split[0]["col_1"].item(), 3)
self.assertIsInstance(dset_split[0]["col_2"], str)
self.assertEqual(dset_split[0]["col_2"], "a")
del dset
@require_tf
def test_set_format_tf(self):
import tensorflow as tf
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset.set_format(type="tensorflow", columns=["col_1"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 1)
self.assertIsInstance(dset_split[0]["col_1"], tf.Tensor)
self.assertListEqual(list(dset_split[0]["col_1"].shape), [])
self.assertEqual(dset_split[0]["col_1"].numpy().item(), 3)
dset.set_format(type="tensorflow", columns=["col_1"], output_all_columns=True)
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 2)
self.assertIsInstance(dset_split[0]["col_2"], str)
self.assertEqual(dset_split[0]["col_2"], "a")
dset.set_format(type="tensorflow", columns=["col_1", "col_2"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 2)
self.assertEqual(dset_split[0]["col_2"].numpy().decode("utf-8"), "a")
del dset
def test_set_format_pandas(self):
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset.set_format(type="pandas", columns=["col_1"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0].columns), 1)
self.assertIsInstance(dset_split[0], pd.DataFrame)
self.assertListEqual(list(dset_split[0].shape), [1, 1])
self.assertEqual(dset_split[0]["col_1"].item(), 3)
dset.set_format(type="pandas", columns=["col_1", "col_2"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0].columns), 2)
self.assertEqual(dset_split[0]["col_2"].item(), "a")
del dset
@require_polars
def test_set_format_polars(self):
import polars as pl
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset.set_format(type="polars", columns=["col_1"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0].columns), 1)
self.assertIsInstance(dset_split[0], pl.DataFrame)
self.assertEqual(dset_split[0].shape, (1, 1))
self.assertEqual(dset_split[0]["col_1"].item(), 3)
dset.set_format(type="polars", columns=["col_1", "col_2"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0].columns), 2)
self.assertEqual(dset_split[0]["col_2"].item(), "a")
del dset
def test_set_transform(self):
def transform(batch):
return {k: [str(i).upper() for i in v] for k, v in batch.items()}
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset.set_transform(transform=transform, columns=["col_1"])
for dset_split in dset.values():
self.assertEqual(dset_split.format["type"], "custom")
self.assertEqual(len(dset_split[0].keys()), 1)
self.assertEqual(dset_split[0]["col_1"], "3")
self.assertEqual(dset_split[:2]["col_1"], ["3", "2"])
self.assertEqual(dset_split["col_1"][:2], ["3", "2"])
prev_format = dset[list(dset.keys())[0]].format
for dset_split in dset.values():
dset_split.set_format(**dset_split.format)
self.assertEqual(prev_format, dset_split.format)
dset.set_transform(transform=transform, columns=["col_1", "col_2"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0].keys()), 2)
self.assertEqual(dset_split[0]["col_2"], "A")
del dset
def test_with_format(self):
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset2 = dset.with_format("numpy", columns=["col_1"])
dset.set_format("numpy", columns=["col_1"])
for dset_split, dset_split2 in zip(dset.values(), dset2.values()):
self.assertDictEqual(dset_split.format, dset_split2.format)
del dset, dset2
def test_with_transform(self):
def transform(batch):
return {k: [str(i).upper() for i in v] for k, v in batch.items()}
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset2 = dset.with_transform(transform, columns=["col_1"])
dset.set_transform(transform, columns=["col_1"])
for dset_split, dset_split2 in zip(dset.values(), dset2.values()):
self.assertDictEqual(dset_split.format, dset_split2.format)
del dset, dset2
def test_cast(self):
dset = self._create_dummy_dataset_dict(multiple_columns=True)
features = dset["train"].features
features["col_1"] = Value("float64")
dset = dset.cast(features)
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 2)
self.assertEqual(dset_split.features["col_1"], Value("float64"))
self.assertIsInstance(dset_split[0]["col_1"], float)
del dset
def test_remove_columns(self):
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset = dset.remove_columns(column_names="col_1")
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 1)
self.assertListEqual(list(dset_split.column_names), ["col_2"])
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset = dset.remove_columns(column_names=["col_1", "col_2"])
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 0)
dset = self._create_dummy_dataset_dict(multiple_columns=True)
for dset_split in dset.values():
dset_split._format_columns = ["col_1", "col_2"]
dset = dset.remove_columns(column_names=["col_1"])
for dset_split in dset.values():
self.assertListEqual(dset_split._format_columns, ["col_2"])
self.assertEqual(dset_split.num_columns, 1)
self.assertListEqual(list(dset_split.column_names), ["col_2"])
del dset
def test_rename_column(self):
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset = dset.rename_column(original_column_name="col_1", new_column_name="new_name")
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 2)
self.assertListEqual(list(dset_split.column_names), ["new_name", "col_2"])
del dset
def test_select_columns(self):
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset = dset.select_columns(column_names=[])
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 0)
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset = dset.select_columns(column_names="col_1")
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 1)
self.assertListEqual(list(dset_split.column_names), ["col_1"])
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset = dset.select_columns(column_names=["col_1", "col_2"])
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 2)
dset = self._create_dummy_dataset_dict(multiple_columns=True)
for dset_split in dset.values():
dset_split._format_columns = ["col_1", "col_2"]
dset = dset.select_columns(column_names=["col_1"])
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 1)
self.assertListEqual(list(dset_split.column_names), ["col_1"])
self.assertListEqual(dset_split._format_columns, ["col_1"])
def test_map(self):
with tempfile.TemporaryDirectory() as tmp_dir:
dsets = self._create_dummy_dataset_dict()
mapped_dsets_1: DatasetDict = dsets.map(lambda ex: {"foo": ["bar"] * len(ex["filename"])}, batched=True)
self.assertListEqual(list(dsets.keys()), list(mapped_dsets_1.keys()))
self.assertListEqual(mapped_dsets_1["train"].column_names, ["filename", "foo"])
cache_file_names = {
"train": os.path.join(tmp_dir, "train.arrow"),
"test": os.path.join(tmp_dir, "test.arrow"),
}
mapped_dsets_2: DatasetDict = mapped_dsets_1.map(
lambda ex: {"bar": ["foo"] * len(ex["filename"])}, batched=True, cache_file_names=cache_file_names
)
self.assertListEqual(list(dsets.keys()), list(mapped_dsets_2.keys()))
self.assertListEqual(sorted(mapped_dsets_2["train"].column_names), sorted(["filename", "foo", "bar"]))
del dsets, mapped_dsets_1, mapped_dsets_2
def test_iterable_map(self):
dsets = self._create_dummy_iterable_dataset_dict()
fn_kwargs = {"n": 3}
mapped_dsets: IterableDatasetDict = dsets.map(
lambda x, n: {"foo": [n] * len(x["filename"])},
batched=True,
fn_kwargs=fn_kwargs,
)
mapped_example = next(iter(mapped_dsets["train"]))
self.assertListEqual(sorted(mapped_example.keys()), sorted(["filename", "foo"]))
self.assertLessEqual(mapped_example["foo"], 3)
del dsets, mapped_dsets
def test_filter(self):
with tempfile.TemporaryDirectory() as tmp_dir:
dsets = self._create_dummy_dataset_dict()
filtered_dsets_1: DatasetDict = dsets.filter(lambda ex: int(ex["filename"].split("_")[-1]) < 10)
self.assertListEqual(list(dsets.keys()), list(filtered_dsets_1.keys()))
self.assertEqual(len(filtered_dsets_1["train"]), 10)
cache_file_names = {
"train": os.path.join(tmp_dir, "train.arrow"),
"test": os.path.join(tmp_dir, "test.arrow"),
}
filtered_dsets_2: DatasetDict = filtered_dsets_1.filter(
lambda ex: int(ex["filename"].split("_")[-1]) < 5, cache_file_names=cache_file_names
)
self.assertListEqual(list(dsets.keys()), list(filtered_dsets_2.keys()))
self.assertEqual(len(filtered_dsets_2["train"]), 5)
filtered_dsets_3: DatasetDict = dsets.filter(
lambda examples: [int(ex.split("_")[-1]) < 10 for ex in examples["filename"]], batched=True
)
self.assertListEqual(list(dsets.keys()), list(filtered_dsets_3.keys()))
self.assertEqual(len(filtered_dsets_3["train"]), 10)
del dsets, filtered_dsets_1, filtered_dsets_2, filtered_dsets_3
def test_iterable_filter(self):
dsets = self._create_dummy_iterable_dataset_dict()
example = next(iter(dsets["train"]))
fn_kwargs = {"n": 3}
filtered_dsets: IterableDatasetDict = dsets.filter(
lambda ex, n: n < int(ex["filename"].split("_")[-1]), fn_kwargs=fn_kwargs
)
filtered_example = next(iter(filtered_dsets["train"]))
self.assertListEqual(list(example.keys()), list(filtered_example.keys()))
self.assertEqual(int(filtered_example["filename"].split("_")[-1]), 4) # id starts from 3
del dsets, filtered_dsets
def test_sort(self):
with tempfile.TemporaryDirectory() as tmp_dir:
dsets = self._create_dummy_dataset_dict()
sorted_dsets_1: DatasetDict = dsets.sort("filename")
self.assertListEqual(list(dsets.keys()), list(sorted_dsets_1.keys()))
self.assertListEqual(
[f.split("_")[-1] for f in sorted_dsets_1["train"]["filename"]],
sorted(f"{x:03d}" for x in range(30)),
)
indices_cache_file_names = {
"train": os.path.join(tmp_dir, "train.arrow"),
"test": os.path.join(tmp_dir, "test.arrow"),
}
sorted_dsets_2: DatasetDict = sorted_dsets_1.sort(
"filename", indices_cache_file_names=indices_cache_file_names, reverse=True
)
self.assertListEqual(list(dsets.keys()), list(sorted_dsets_2.keys()))
self.assertListEqual(
[f.split("_")[-1] for f in sorted_dsets_2["train"]["filename"]],
sorted((f"{x:03d}" for x in range(30)), reverse=True),
)
del dsets, sorted_dsets_1, sorted_dsets_2
def test_shuffle(self):
with tempfile.TemporaryDirectory() as tmp_dir:
dsets = self._create_dummy_dataset_dict()
indices_cache_file_names = {
"train": os.path.join(tmp_dir, "train.arrow"),
"test": os.path.join(tmp_dir, "test.arrow"),
}
seeds = {
"train": 1234,
"test": 1234,
}
dsets_shuffled = dsets.shuffle(
seeds=seeds, indices_cache_file_names=indices_cache_file_names, load_from_cache_file=False
)
self.assertListEqual(dsets_shuffled["train"]["filename"], dsets_shuffled["test"]["filename"])
self.assertEqual(len(dsets_shuffled["train"]), 30)
self.assertEqual(dsets_shuffled["train"][0]["filename"], "my_name-train_028")
self.assertEqual(dsets_shuffled["train"][2]["filename"], "my_name-train_010")
self.assertDictEqual(dsets["train"].features, Features({"filename": Value("string")}))
self.assertDictEqual(dsets_shuffled["train"].features, Features({"filename": Value("string")}))
# Reproducibility
indices_cache_file_names_2 = {
"train": os.path.join(tmp_dir, "train_2.arrow"),
"test": os.path.join(tmp_dir, "test_2.arrow"),
}
dsets_shuffled_2 = dsets.shuffle(
seeds=seeds, indices_cache_file_names=indices_cache_file_names_2, load_from_cache_file=False
)
self.assertListEqual(dsets_shuffled["train"]["filename"], dsets_shuffled_2["train"]["filename"])
seeds = {
"train": 1234,
"test": 1,
}
indices_cache_file_names_3 = {
"train": os.path.join(tmp_dir, "train_3.arrow"),
"test": os.path.join(tmp_dir, "test_3.arrow"),
}
dsets_shuffled_3 = dsets.shuffle(
seeds=seeds, indices_cache_file_names=indices_cache_file_names_3, load_from_cache_file=False
)
self.assertNotEqual(dsets_shuffled_3["train"]["filename"], dsets_shuffled_3["test"]["filename"])
# other input types
dsets_shuffled_int = dsets.shuffle(42)
dsets_shuffled_alias = dsets.shuffle(seed=42)
dsets_shuffled_none = dsets.shuffle()
self.assertEqual(len(dsets_shuffled_int["train"]), 30)
self.assertEqual(len(dsets_shuffled_alias["train"]), 30)
self.assertEqual(len(dsets_shuffled_none["train"]), 30)
del dsets, dsets_shuffled, dsets_shuffled_2, dsets_shuffled_3
del dsets_shuffled_int, dsets_shuffled_alias, dsets_shuffled_none
def test_flatten_indices(self):
with tempfile.TemporaryDirectory() as tmp_dir:
dsets = self._create_dummy_dataset_dict()
indices_cache_file_names = {
"train": os.path.join(tmp_dir, "train.arrow"),
"test": os.path.join(tmp_dir, "test.arrow"),
}
dsets_shuffled = dsets.shuffle(
seed=42, indices_cache_file_names=indices_cache_file_names, load_from_cache_file=False
)
self.assertIsNotNone(dsets_shuffled["train"]._indices)
self.assertIsNotNone(dsets_shuffled["test"]._indices)
dsets_flat = dsets_shuffled.flatten_indices()
self.assertIsNone(dsets_flat["train"]._indices)
self.assertIsNone(dsets_flat["test"]._indices)
del dsets, dsets_shuffled, dsets_flat
def test_check_values_type(self):
dsets = self._create_dummy_dataset_dict()
dsets["bad_split"] = None
self.assertRaises(TypeError, dsets.map, lambda x: x)
self.assertRaises(TypeError, dsets.filter, lambda x: True)
self.assertRaises(TypeError, dsets.shuffle)
self.assertRaises(TypeError, dsets.sort, "filename")
del dsets
def test_serialization(self):
with tempfile.TemporaryDirectory() as tmp_dir:
dsets = self._create_dummy_dataset_dict()
dsets.save_to_disk(tmp_dir)
reloaded_dsets = DatasetDict.load_from_disk(tmp_dir)
self.assertListEqual(sorted(reloaded_dsets), ["test", "train"])
self.assertEqual(len(reloaded_dsets["train"]), 30)
self.assertListEqual(reloaded_dsets["train"].column_names, ["filename"])
self.assertEqual(len(reloaded_dsets["test"]), 30)
self.assertListEqual(reloaded_dsets["test"].column_names, ["filename"])
del reloaded_dsets
del dsets["test"]
dsets.save_to_disk(tmp_dir)
reloaded_dsets = DatasetDict.load_from_disk(tmp_dir)
self.assertListEqual(sorted(reloaded_dsets), ["train"])
self.assertEqual(len(reloaded_dsets["train"]), 30)
self.assertListEqual(reloaded_dsets["train"].column_names, ["filename"])
del dsets, reloaded_dsets
dsets = self._create_dummy_dataset_dict()
dsets.save_to_disk(tmp_dir, num_shards={"train": 3, "test": 2})
reloaded_dsets = DatasetDict.load_from_disk(tmp_dir)
self.assertListEqual(sorted(reloaded_dsets), ["test", "train"])
self.assertEqual(len(reloaded_dsets["train"]), 30)
self.assertListEqual(reloaded_dsets["train"].column_names, ["filename"])
self.assertEqual(len(reloaded_dsets["train"].cache_files), 3)
self.assertEqual(len(reloaded_dsets["test"]), 30)
self.assertListEqual(reloaded_dsets["test"].column_names, ["filename"])
self.assertEqual(len(reloaded_dsets["test"].cache_files), 2)
del reloaded_dsets
dsets = self._create_dummy_dataset_dict()
dsets.save_to_disk(tmp_dir, num_proc=2)
reloaded_dsets = DatasetDict.load_from_disk(tmp_dir)
self.assertListEqual(sorted(reloaded_dsets), ["test", "train"])
self.assertEqual(len(reloaded_dsets["train"]), 30)
self.assertListEqual(reloaded_dsets["train"].column_names, ["filename"])
self.assertEqual(len(reloaded_dsets["train"].cache_files), 2)
self.assertEqual(len(reloaded_dsets["test"]), 30)
self.assertListEqual(reloaded_dsets["test"].column_names, ["filename"])
self.assertEqual(len(reloaded_dsets["test"].cache_files), 2)
del reloaded_dsets
def test_load_from_disk(self):
with tempfile.TemporaryDirectory() as tmp_dir:
dsets = self._create_dummy_dataset_dict()
dsets.save_to_disk(tmp_dir)
del dsets
dsets = load_from_disk(tmp_dir)
self.assertListEqual(sorted(dsets), ["test", "train"])
self.assertEqual(len(dsets["train"]), 30)
self.assertListEqual(dsets["train"].column_names, ["filename"])
self.assertEqual(len(dsets["test"]), 30)
self.assertListEqual(dsets["test"].column_names, ["filename"])
del dsets
def test_align_labels_with_mapping(self):
train_features = Features(
{
"input_text": Value("string"),
"input_labels": ClassLabel(num_classes=3, names=["entailment", "neutral", "contradiction"]),
}
)
test_features = Features(
{
"input_text": Value("string"),
"input_labels": ClassLabel(num_classes=3, names=["entailment", "contradiction", "neutral"]),
}
)
train_data = {"input_text": ["a", "a", "b", "b", "c", "c"], "input_labels": [0, 0, 1, 1, 2, 2]}
test_data = {"input_text": ["a", "a", "c", "c", "b", "b"], "input_labels": [0, 0, 1, 1, 2, 2]}
label2id = {"CONTRADICTION": 0, "ENTAILMENT": 2, "NEUTRAL": 1}
id2label = {v: k for k, v in label2id.items()}
train_expected_labels = [2, 2, 1, 1, 0, 0]
test_expected_labels = [2, 2, 0, 0, 1, 1]
train_expected_label_names = [id2label[idx] for idx in train_expected_labels]
test_expected_label_names = [id2label[idx] for idx in test_expected_labels]
dsets = DatasetDict(
{
"train": Dataset.from_dict(train_data, features=train_features),
"test": Dataset.from_dict(test_data, features=test_features),
}
)
dsets = dsets.align_labels_with_mapping(label2id, "input_labels")
self.assertListEqual(train_expected_labels, dsets["train"]["input_labels"])
self.assertListEqual(test_expected_labels, dsets["test"]["input_labels"])
train_aligned_label_names = [
dsets["train"].features["input_labels"].int2str(idx) for idx in dsets["train"]["input_labels"]
]
test_aligned_label_names = [
dsets["test"].features["input_labels"].int2str(idx) for idx in dsets["test"]["input_labels"]
]
self.assertListEqual(train_expected_label_names, train_aligned_label_names)
self.assertListEqual(test_expected_label_names, test_aligned_label_names)
def test_dummy_datasetdict_serialize_fs(mockfs):
dataset_dict = DatasetDict(
{
"train": Dataset.from_dict({"a": range(30)}),
"test": Dataset.from_dict({"a": range(10)}),
}
)
dataset_path = "mock://my_dataset"
dataset_dict.save_to_disk(dataset_path, storage_options=mockfs.storage_options)
assert mockfs.isdir(dataset_path)
assert mockfs.glob(dataset_path + "/*")
reloaded = dataset_dict.load_from_disk(dataset_path, storage_options=mockfs.storage_options)
assert list(reloaded) == list(dataset_dict)
for k in dataset_dict:
assert reloaded[k].features == dataset_dict[k].features
assert reloaded[k].to_dict() == dataset_dict[k].to_dict()
def _check_csv_datasetdict(dataset_dict, expected_features, splits=("train",)):
assert isinstance(dataset_dict, DatasetDict)
for split in splits:
dataset = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_datasetdict_from_csv_keep_in_memory(keep_in_memory, csv_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = DatasetDict.from_csv({"train": csv_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory)
_check_csv_datasetdict(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
],
)
def test_datasetdict_from_csv_features(features, csv_path, tmp_path):
cache_dir = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
default_expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = DatasetDict.from_csv({"train": csv_path}, features=features, cache_dir=cache_dir)
_check_csv_datasetdict(dataset, expected_features)
@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"])
def test_datasetdict_from_csv_split(split, csv_path, tmp_path):
if split:
path = {split: csv_path}
else:
split = "train"
path = {"train": csv_path, "test": csv_path}
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"}
dataset = DatasetDict.from_csv(path, cache_dir=cache_dir)
_check_csv_datasetdict(dataset, expected_features, splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
def _check_json_datasetdict(dataset_dict, expected_features, splits=("train",)):
assert isinstance(dataset_dict, DatasetDict)
for split in splits:
dataset = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_datasetdict_from_json_keep_in_memory(keep_in_memory, jsonl_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = DatasetDict.from_json({"train": jsonl_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory)
_check_json_datasetdict(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
],
)
def test_datasetdict_from_json_features(features, jsonl_path, tmp_path):
cache_dir = tmp_path / "cache"
default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = DatasetDict.from_json({"train": jsonl_path}, features=features, cache_dir=cache_dir)
_check_json_datasetdict(dataset, expected_features)
@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"])
def test_datasetdict_from_json_splits(split, jsonl_path, tmp_path):
if split:
path = {split: jsonl_path}
else:
split = "train"
path = {"train": jsonl_path, "test": jsonl_path}
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
dataset = DatasetDict.from_json(path, cache_dir=cache_dir)
_check_json_datasetdict(dataset, expected_features, splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
def _check_parquet_datasetdict(dataset_dict, expected_features, splits=("train",)):
assert isinstance(dataset_dict, DatasetDict)
for split in splits:
dataset = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_datasetdict_from_parquet_keep_in_memory(keep_in_memory, parquet_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = DatasetDict.from_parquet({"train": parquet_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory)
_check_parquet_datasetdict(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
],
)
def test_datasetdict_from_parquet_features(features, parquet_path, tmp_path):
cache_dir = tmp_path / "cache"
default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = DatasetDict.from_parquet({"train": parquet_path}, features=features, cache_dir=cache_dir)
_check_parquet_datasetdict(dataset, expected_features)
@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"])
def test_datasetdict_from_parquet_split(split, parquet_path, tmp_path):
if split:
path = {split: parquet_path}
else:
split = "train"
path = {"train": parquet_path, "test": parquet_path}
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
dataset = DatasetDict.from_parquet(path, cache_dir=cache_dir)
_check_parquet_datasetdict(dataset, expected_features, splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
def _check_text_datasetdict(dataset_dict, expected_features, splits=("train",)):
assert isinstance(dataset_dict, DatasetDict)
for split in splits:
dataset = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_datasetdict_from_text_keep_in_memory(keep_in_memory, text_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = DatasetDict.from_text({"train": text_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory)
_check_text_datasetdict(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
],
)
def test_datasetdict_from_text_features(features, text_path, tmp_path):
cache_dir = tmp_path / "cache"
default_expected_features = {"text": "string"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = DatasetDict.from_text({"train": text_path}, features=features, cache_dir=cache_dir)
_check_text_datasetdict(dataset, expected_features)
@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"])
def test_datasetdict_from_text_split(split, text_path, tmp_path):
if split:
path = {split: text_path}
else:
split = "train"
path = {"train": text_path, "test": text_path}
cache_dir = tmp_path / "cache"
expected_features = {"text": "string"}
dataset = DatasetDict.from_text(path, cache_dir=cache_dir)
_check_text_datasetdict(dataset, expected_features, splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
| datasets/tests/test_dataset_dict.py/0 | {
"file_path": "datasets/tests/test_dataset_dict.py",
"repo_id": "datasets",
"token_count": 17807
} | 83 |
import pickle
from copy import deepcopy
from itertools import chain, islice
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.compute as pc
import pytest
from datasets import Dataset, load_dataset
from datasets.combine import concatenate_datasets, interleave_datasets
from datasets.features import (
ClassLabel,
Features,
Image,
Value,
)
from datasets.formatting import get_format_type_from_alias
from datasets.info import DatasetInfo
from datasets.iterable_dataset import (
ArrowExamplesIterable,
BufferShuffledExamplesIterable,
CyclingMultiSourcesExamplesIterable,
ExamplesIterable,
FilteredExamplesIterable,
FormattingConfig,
HorizontallyConcatenatedMultiSourcesExamplesIterable,
IterableDataset,
MappedExamplesIterable,
RandomlyCyclingMultiSourcesExamplesIterable,
SelectColumnsIterable,
ShuffledDataSourcesArrowExamplesIterable,
ShuffledDataSourcesExamplesIterable,
ShufflingConfig,
SkipExamplesIterable,
StepExamplesIterable,
TakeExamplesIterable,
TypedExamplesIterable,
VerticallyConcatenatedMultiSourcesExamplesIterable,
_BaseExamplesIterable,
_batch_arrow_tables,
_batch_to_examples,
_convert_to_arrow,
_examples_to_batch,
)
from .utils import (
assert_arrow_memory_doesnt_increase,
is_rng_equal,
require_dill_gt_0_3_2,
require_not_windows,
require_pyspark,
require_tf,
require_torch,
)
DEFAULT_N_EXAMPLES = 20
DEFAULT_BATCH_SIZE = 4
DEFAULT_FILEPATH = "file.txt"
SAMPLE_DATASET_IDENTIFIER = "hf-internal-testing/dataset_with_script" # has dataset script
def generate_examples_fn(**kwargs):
kwargs = kwargs.copy()
n = kwargs.pop("n", DEFAULT_N_EXAMPLES)
filepaths = kwargs.pop("filepaths", None)
for filepath in filepaths or [DEFAULT_FILEPATH]:
if filepaths is not None:
kwargs["filepath"] = filepath
for i in range(n):
yield f"{filepath}_{i}", {"id": i, **kwargs}
def generate_tables_fn(**kwargs):
kwargs = kwargs.copy()
n = kwargs.pop("n", DEFAULT_N_EXAMPLES)
batch_size = kwargs.pop("batch_size", DEFAULT_BATCH_SIZE)
filepaths = kwargs.pop("filepaths", None)
for filepath in filepaths or [DEFAULT_FILEPATH]:
buffer = []
batch_idx = 0
if filepaths is not None:
kwargs["filepath"] = filepath
for i in range(n):
buffer.append({"id": i, **kwargs})
if len(buffer) == batch_size:
yield f"{filepath}_{batch_idx}", pa.Table.from_pylist(buffer)
buffer = []
batch_idx += 1
yield batch_idx, pa.Table.from_pylist(buffer)
@pytest.fixture
def dataset():
ex_iterable = ExamplesIterable(generate_examples_fn, {})
return IterableDataset(ex_iterable, info=DatasetInfo(description="dummy"), split="train")
@pytest.fixture
def dataset_with_several_columns():
ex_iterable = ExamplesIterable(
generate_examples_fn,
{"filepath": ["data0.txt", "data1.txt", "data2.txt"], "metadata": {"sources": ["https://foo.bar"]}},
)
return IterableDataset(ex_iterable, info=DatasetInfo(description="dummy"), split="train")
@pytest.fixture
def arrow_file(tmp_path_factory, dataset: IterableDataset):
filename = str(tmp_path_factory.mktemp("data") / "file.arrow")
Dataset.from_generator(dataset.__iter__).map(cache_file_name=filename)
return filename
################################
#
# Utilities tests
#
################################
@pytest.mark.parametrize("batch_size", [1, 2, 3, 9, 10, 11, 20])
@pytest.mark.parametrize("drop_last_batch", [False, True])
def test_convert_to_arrow(batch_size, drop_last_batch):
examples = [{"foo": i} for i in range(10)]
full_table = pa.Table.from_pylist(examples)
num_rows = len(full_table) if not drop_last_batch else len(full_table) // batch_size * batch_size
num_batches = (num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size
subtables = list(
_convert_to_arrow(
list(enumerate(examples)),
batch_size=batch_size,
drop_last_batch=drop_last_batch,
)
)
assert len(subtables) == num_batches
if drop_last_batch:
assert all(len(subtable) == batch_size for _, subtable in subtables)
else:
assert all(len(subtable) == batch_size for _, subtable in subtables[:-1])
assert len(subtables[-1][1]) <= batch_size
if num_rows > 0:
reloaded = pa.concat_tables([subtable for _, subtable in subtables])
assert full_table.slice(0, num_rows).to_pydict() == reloaded.to_pydict()
@pytest.mark.parametrize(
"tables",
[
[pa.table({"foo": range(10)})],
[pa.table({"foo": range(0, 5)}), pa.table({"foo": range(5, 10)})],
[pa.table({"foo": [i]}) for i in range(10)],
],
)
@pytest.mark.parametrize("batch_size", [1, 2, 3, 9, 10, 11, 20])
@pytest.mark.parametrize("drop_last_batch", [False, True])
def test_batch_arrow_tables(tables, batch_size, drop_last_batch):
full_table = pa.concat_tables(tables)
num_rows = len(full_table) if not drop_last_batch else len(full_table) // batch_size * batch_size
num_batches = (num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size
subtables = list(
_batch_arrow_tables(list(enumerate(tables)), batch_size=batch_size, drop_last_batch=drop_last_batch)
)
assert len(subtables) == num_batches
if drop_last_batch:
assert all(len(subtable) == batch_size for _, subtable in subtables)
else:
assert all(len(subtable) == batch_size for _, subtable in subtables[:-1])
assert len(subtables[-1][1]) <= batch_size
if num_rows > 0:
reloaded = pa.concat_tables([subtable for _, subtable in subtables])
assert full_table.slice(0, num_rows).to_pydict() == reloaded.to_pydict()
################################
#
# _BaseExampleIterable tests
#
################################
def test_examples_iterable():
ex_iterable = ExamplesIterable(generate_examples_fn, {})
expected = list(generate_examples_fn())
assert next(iter(ex_iterable)) == expected[0]
assert list(ex_iterable) == expected
assert ex_iterable.iter_arrow is None
def test_examples_iterable_with_kwargs():
ex_iterable = ExamplesIterable(generate_examples_fn, {"filepaths": ["0.txt", "1.txt"], "split": "train"})
expected = list(generate_examples_fn(filepaths=["0.txt", "1.txt"], split="train"))
assert list(ex_iterable) == expected
assert all("split" in ex for _, ex in ex_iterable)
assert sorted({ex["filepath"] for _, ex in ex_iterable}) == ["0.txt", "1.txt"]
def test_examples_iterable_shuffle_data_sources():
ex_iterable = ExamplesIterable(generate_examples_fn, {"filepaths": ["0.txt", "1.txt"]})
ex_iterable = ex_iterable.shuffle_data_sources(np.random.default_rng(40))
expected = list(generate_examples_fn(filepaths=["1.txt", "0.txt"])) # shuffle the filepaths
assert list(ex_iterable) == expected
def test_examples_iterable_shuffle_shards_and_metadata():
def gen(filepaths, all_metadata):
for i, (filepath, metadata) in enumerate(zip(filepaths, all_metadata)):
yield i, {"filepath": filepath, "metadata": metadata}
ex_iterable = ExamplesIterable(
gen,
{
"filepaths": [f"{i}.txt" for i in range(100)],
"all_metadata": [{"id": str(i)} for i in range(100)],
},
)
ex_iterable = ex_iterable.shuffle_data_sources(np.random.default_rng(42))
out = list(ex_iterable)
filepaths_ids = [x["filepath"].split(".")[0] for _, x in out]
metadata_ids = [x["metadata"]["id"] for _, x in out]
assert filepaths_ids == metadata_ids, "entangled lists of shards/metadata should be shuffled the same way"
def test_arrow_examples_iterable():
ex_iterable = ArrowExamplesIterable(generate_tables_fn, {})
expected = sum([pa_table.to_pylist() for _, pa_table in generate_tables_fn()], [])
assert next(iter(ex_iterable))[1] == expected[0]
assert [example for _, example in ex_iterable] == expected
expected = list(generate_tables_fn())
assert list(ex_iterable.iter_arrow()) == expected
def test_arrow_examples_iterable_with_kwargs():
ex_iterable = ArrowExamplesIterable(generate_tables_fn, {"filepaths": ["0.txt", "1.txt"], "split": "train"})
expected = sum(
[pa_table.to_pylist() for _, pa_table in generate_tables_fn(filepaths=["0.txt", "1.txt"], split="train")], []
)
assert [example for _, example in ex_iterable] == expected
assert all("split" in ex for _, ex in ex_iterable)
assert sorted({ex["filepath"] for _, ex in ex_iterable}) == ["0.txt", "1.txt"]
expected = list(generate_tables_fn(filepaths=["0.txt", "1.txt"], split="train"))
assert list(ex_iterable.iter_arrow()) == expected
def test_arrow_examples_iterable_shuffle_data_sources():
ex_iterable = ArrowExamplesIterable(generate_tables_fn, {"filepaths": ["0.txt", "1.txt"]})
ex_iterable = ex_iterable.shuffle_data_sources(np.random.default_rng(40))
expected = sum(
[pa_table.to_pylist() for _, pa_table in generate_tables_fn(filepaths=["1.txt", "0.txt"])], []
) # shuffle the filepaths
assert [example for _, example in ex_iterable] == expected
expected = list(generate_tables_fn(filepaths=["1.txt", "0.txt"]))
assert list(ex_iterable.iter_arrow()) == expected
@pytest.mark.parametrize("seed", [42, 1337, 101010, 123456])
def test_buffer_shuffled_examples_iterable(seed):
n, buffer_size = 100, 30
generator = np.random.default_rng(seed)
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = BufferShuffledExamplesIterable(base_ex_iterable, buffer_size=buffer_size, generator=generator)
rng = deepcopy(generator)
expected_indices_used_for_shuffling = list(
islice(BufferShuffledExamplesIterable._iter_random_indices(rng, buffer_size=buffer_size), n - buffer_size)
)
# indices to pick in the shuffle buffer should all be in the right range
assert all(0 <= index_to_pick < buffer_size for index_to_pick in expected_indices_used_for_shuffling)
# it should be random indices
assert expected_indices_used_for_shuffling != list(range(buffer_size))
# The final order of examples is the result of a shuffle buffer.
all_examples = list(generate_examples_fn(n=n))
# We create a buffer and we pick random examples from it.
buffer, rest = all_examples[:buffer_size], all_examples[buffer_size:]
expected = []
for i, index_to_pick in enumerate(expected_indices_used_for_shuffling):
expected.append(buffer[index_to_pick])
# The picked examples are directly replaced by the next examples from the iterable.
buffer[index_to_pick] = rest.pop(0)
# Once we have reached the end of the iterable, we shuffle the buffer and return the remaining examples.
rng.shuffle(buffer)
expected += buffer
assert next(iter(ex_iterable)) == expected[0]
assert list(ex_iterable) == expected
assert sorted(ex_iterable) == sorted(all_examples)
def test_cycling_multi_sources_examples_iterable():
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"text": "foo"})
ex_iterable2 = ExamplesIterable(generate_examples_fn, {"text": "bar"})
ex_iterable = CyclingMultiSourcesExamplesIterable([ex_iterable1, ex_iterable2])
expected = list(chain(*zip(generate_examples_fn(text="foo"), generate_examples_fn(text="bar"))))
# The cycling stops as soon as one iterable is out of examples (here ex_iterable1), so the last sample from ex_iterable2 is unecessary
expected = expected[:-1]
assert next(iter(ex_iterable)) == expected[0]
assert list(ex_iterable) == expected
assert all((x["id"], x["text"]) == (i // 2, "bar" if i % 2 else "foo") for i, (_, x) in enumerate(ex_iterable))
@pytest.mark.parametrize("probabilities", [None, (0.5, 0.5), (0.9, 0.1)])
def test_randomly_cycling_multi_sources_examples_iterable(probabilities):
seed = 42
generator = np.random.default_rng(seed)
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"text": "foo"})
ex_iterable2 = ExamplesIterable(generate_examples_fn, {"text": "bar"})
ex_iterable = RandomlyCyclingMultiSourcesExamplesIterable(
[ex_iterable1, ex_iterable2], generator=generator, probabilities=probabilities
)
# The source used randomly changes at each example. It stops when one of the iterators is empty.
rng = deepcopy(generator)
iterators = (generate_examples_fn(text="foo"), generate_examples_fn(text="bar"))
indices_iterator = RandomlyCyclingMultiSourcesExamplesIterable._iter_random_indices(
rng, len(iterators), p=probabilities
)
expected = []
lengths = [len(list(ex_iterable1)), len(list(ex_iterable2))]
for i in indices_iterator:
if lengths[0] == 0 or lengths[1] == 0:
break
for key, example in iterators[i]:
expected.append((key, example))
lengths[i] -= 1
break
else:
break
assert next(iter(ex_iterable)) == expected[0]
assert list(ex_iterable) == expected
@pytest.mark.parametrize(
"n, func, batched, batch_size",
[
(3, lambda x: {"id+1": x["id"] + 1}, False, None), # just add 1 to the id
(3, lambda x: {"id+1": [x["id"][0] + 1]}, True, 1), # same with bs=1
(5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, 10), # same with bs=10
(25, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, 10), # same with bs=10
(5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, None), # same with bs=None
(5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, -1), # same with bs<=0
(3, lambda x: {k: v * 2 for k, v in x.items()}, True, 1), # make a duplicate of each example
],
)
def test_mapped_examples_iterable(n, func, batched, batch_size):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = MappedExamplesIterable(base_ex_iterable, func, batched=batched, batch_size=batch_size)
all_examples = [x for _, x in generate_examples_fn(n=n)]
if batched is False:
expected = [{**x, **func(x)} for x in all_examples]
else:
# For batched map we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function
all_transformed_examples = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = _examples_to_batch(examples)
transformed_batch = func(batch)
all_transformed_examples.extend(_batch_to_examples(transformed_batch))
expected = _examples_to_batch(all_examples)
expected.update(_examples_to_batch(all_transformed_examples))
expected = list(_batch_to_examples(expected))
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
@pytest.mark.parametrize(
"n, func, batched, batch_size",
[
(3, lambda x: {"id+1": x["id"] + 1}, False, None), # just add 1 to the id
(3, lambda x: {"id+1": [x["id"][0] + 1]}, True, 1), # same with bs=1
(5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, 10), # same with bs=10
(25, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, 10), # same with bs=10
(5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, None), # same with bs=None
(5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, -1), # same with bs<=0
(3, lambda x: {k: v * 2 for k, v in x.items()}, True, 1), # make a duplicate of each example
],
)
def test_mapped_examples_iterable_drop_last_batch(n, func, batched, batch_size):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = MappedExamplesIterable(
base_ex_iterable, func, batched=batched, batch_size=batch_size, drop_last_batch=True
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
is_empty = False
if batched is False:
# `drop_last_batch` has no effect here
expected = [{**x, **func(x)} for x in all_examples]
else:
# For batched map we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function
all_transformed_examples = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
if len(examples) < batch_size: # ignore last batch
break
batch = _examples_to_batch(examples)
transformed_batch = func(batch)
all_transformed_examples.extend(_batch_to_examples(transformed_batch))
all_examples = all_examples if n % batch_size == 0 else all_examples[: n // batch_size * batch_size]
if all_examples:
expected = _examples_to_batch(all_examples)
expected.update(_examples_to_batch(all_transformed_examples))
expected = list(_batch_to_examples(expected))
else:
is_empty = True
if not is_empty:
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
else:
with pytest.raises(StopIteration):
next(iter(ex_iterable))
@pytest.mark.parametrize(
"n, func, batched, batch_size",
[
(3, lambda x, index: {"id+idx": x["id"] + index}, False, None), # add the index to the id
(
25,
lambda x, indices: {"id+idx": [i + j for i, j in zip(x["id"], indices)]},
True,
10,
), # add the index to the id
(5, lambda x, indices: {"id+idx": [i + j for i, j in zip(x["id"], indices)]}, True, None), # same with bs=None
(5, lambda x, indices: {"id+idx": [i + j for i, j in zip(x["id"], indices)]}, True, -1), # same with bs<=0
],
)
def test_mapped_examples_iterable_with_indices(n, func, batched, batch_size):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = MappedExamplesIterable(
base_ex_iterable, func, batched=batched, batch_size=batch_size, with_indices=True
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
if batched is False:
expected = [{**x, **func(x, idx)} for idx, x in enumerate(all_examples)]
else:
# For batched map we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function
all_transformed_examples = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = _examples_to_batch(examples)
indices = list(range(batch_offset, batch_offset + len(examples)))
transformed_batch = func(batch, indices)
all_transformed_examples.extend(_batch_to_examples(transformed_batch))
expected = _examples_to_batch(all_examples)
expected.update(_examples_to_batch(all_transformed_examples))
expected = list(_batch_to_examples(expected))
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
@pytest.mark.parametrize(
"n, func, batched, batch_size, remove_columns",
[
(3, lambda x: {"id+1": x["id"] + 1}, False, None, ["extra_column"]), # just add 1 to the id
(25, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, 10, ["extra_column"]), # same with bs=10
(
50,
lambda x: {"foo": ["bar"] * np.random.default_rng(x["id"][0]).integers(0, 10)},
True,
8,
["extra_column", "id"],
), # make a duplicate of each example
(5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, None, ["extra_column"]), # same with bs=None
(5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, -1, ["extra_column"]), # same with bs<=0
],
)
def test_mapped_examples_iterable_remove_columns(n, func, batched, batch_size, remove_columns):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n, "extra_column": "foo"})
ex_iterable = MappedExamplesIterable(
base_ex_iterable, func, batched=batched, batch_size=batch_size, remove_columns=remove_columns
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
columns_to_remove = remove_columns if isinstance(remove_columns, list) else [remove_columns]
if batched is False:
expected = [{**{k: v for k, v in x.items() if k not in columns_to_remove}, **func(x)} for x in all_examples]
else:
# For batched map we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function
all_transformed_examples = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = _examples_to_batch(examples)
transformed_batch = func(batch)
all_transformed_examples.extend(_batch_to_examples(transformed_batch))
expected = {k: v for k, v in _examples_to_batch(all_examples).items() if k not in columns_to_remove}
expected.update(_examples_to_batch(all_transformed_examples))
expected = list(_batch_to_examples(expected))
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
@pytest.mark.parametrize(
"n, func, batched, batch_size, fn_kwargs",
[
(3, lambda x, y=0: {"id+y": x["id"] + y}, False, None, None),
(3, lambda x, y=0: {"id+y": x["id"] + y}, False, None, {"y": 3}),
(25, lambda x, y=0: {"id+y": [i + y for i in x["id"]]}, True, 10, {"y": 3}),
(5, lambda x, y=0: {"id+y": [i + y for i in x["id"]]}, True, None, {"y": 3}), # same with bs=None
(5, lambda x, y=0: {"id+y": [i + y for i in x["id"]]}, True, -1, {"y": 3}), # same with bs<=0
],
)
def test_mapped_examples_iterable_fn_kwargs(n, func, batched, batch_size, fn_kwargs):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = MappedExamplesIterable(
base_ex_iterable, func, batched=batched, batch_size=batch_size, fn_kwargs=fn_kwargs
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
if fn_kwargs is None:
fn_kwargs = {}
if batched is False:
expected = [{**x, **func(x, **fn_kwargs)} for x in all_examples]
else:
# For batched map we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function
all_transformed_examples = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = _examples_to_batch(examples)
transformed_batch = func(batch, **fn_kwargs)
all_transformed_examples.extend(_batch_to_examples(transformed_batch))
expected = _examples_to_batch(all_examples)
expected.update(_examples_to_batch(all_transformed_examples))
expected = list(_batch_to_examples(expected))
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
@pytest.mark.parametrize(
"n, func, batched, batch_size, input_columns",
[
(3, lambda id_: {"id+1": id_ + 1}, False, None, ["id"]), # just add 1 to the id
(25, lambda ids_: {"id+1": [i + 1 for i in ids_]}, True, 10, ["id"]), # same with bs=10
(5, lambda ids_: {"id+1": [i + 1 for i in ids_]}, True, None, ["id"]), # same with bs=None
(5, lambda ids_: {"id+1": [i + 1 for i in ids_]}, True, -1, ["id"]), # same with bs<=0
],
)
def test_mapped_examples_iterable_input_columns(n, func, batched, batch_size, input_columns):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = MappedExamplesIterable(
base_ex_iterable, func, batched=batched, batch_size=batch_size, input_columns=input_columns
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
columns_to_input = input_columns if isinstance(input_columns, list) else [input_columns]
if batched is False:
expected = [{**x, **func(*[x[col] for col in columns_to_input])} for x in all_examples]
else:
# For batched map we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function
all_transformed_examples = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = _examples_to_batch(examples)
transformed_batch = func(*[batch[col] for col in columns_to_input])
all_transformed_examples.extend(_batch_to_examples(transformed_batch))
expected = _examples_to_batch(all_examples)
expected.update(_examples_to_batch(all_transformed_examples))
expected = list(_batch_to_examples(expected))
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
@pytest.mark.parametrize(
"n, func, batched, batch_size",
[
(3, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), False, None), # just add 1 to the id
(3, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 1), # same with bs=1
(5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 10), # same with bs=10
(25, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 10), # same with bs=10
(5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, None), # same with bs=None
(5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, -1), # same with bs<=0
(3, lambda t: pa.concat_tables([t] * 2), True, 1), # make a duplicate of each example
],
)
def test_mapped_examples_iterable_arrow_format(n, func, batched, batch_size):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = MappedExamplesIterable(
base_ex_iterable,
func,
batched=batched,
batch_size=batch_size,
formatting=FormattingConfig(format_type="arrow"),
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
if batched is False:
expected = [func(pa.Table.from_pylist([x])).to_pylist()[0] for x in all_examples]
else:
expected = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = pa.Table.from_pylist(examples)
expected.extend(func(batch).to_pylist())
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
@pytest.mark.parametrize(
"n, func, batched, batch_size",
[
(3, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), False, None), # just add 1 to the id
(3, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 1), # same with bs=1
(5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 10), # same with bs=10
(25, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 10), # same with bs=10
(5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, None), # same with bs=None
(5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, -1), # same with bs<=0
(3, lambda t: pa.concat_tables([t] * 2), True, 1), # make a duplicate of each example
],
)
def test_mapped_examples_iterable_drop_last_batch_and_arrow_format(n, func, batched, batch_size):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = MappedExamplesIterable(
base_ex_iterable,
func,
batched=batched,
batch_size=batch_size,
drop_last_batch=True,
formatting=FormattingConfig(format_type="arrow"),
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
is_empty = False
if batched is False:
# `drop_last_batch` has no effect here
expected = [func(pa.Table.from_pylist([x])).to_pylist()[0] for x in all_examples]
else:
all_transformed_examples = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
if len(examples) < batch_size: # ignore last batch
break
batch = pa.Table.from_pylist(examples)
out = func(batch)
all_transformed_examples.extend(
out.to_pylist()
) # we don't merge with input since they're arrow tables and not dictionaries
all_examples = all_examples if n % batch_size == 0 else all_examples[: n // batch_size * batch_size]
if all_examples:
expected = all_transformed_examples
else:
is_empty = True
if not is_empty:
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
else:
with pytest.raises(StopIteration):
next(iter(ex_iterable))
@pytest.mark.parametrize(
"n, func, batched, batch_size",
[
(
3,
lambda t, index: t.append_column("id+idx", pc.add(t["id"], index)),
False,
None,
), # add the index to the id
(
25,
lambda t, indices: t.append_column("id+idx", pc.add(t["id"], indices)),
True,
10,
), # add the index to the id
(5, lambda t, indices: t.append_column("id+idx", pc.add(t["id"], indices)), True, None), # same with bs=None
(5, lambda t, indices: t.append_column("id+idx", pc.add(t["id"], indices)), True, -1), # same with bs<=0
],
)
def test_mapped_examples_iterable_with_indices_and_arrow_format(n, func, batched, batch_size):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = MappedExamplesIterable(
base_ex_iterable,
func,
batched=batched,
batch_size=batch_size,
with_indices=True,
formatting=FormattingConfig(format_type="arrow"),
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
if batched is False:
expected = [func(pa.Table.from_pylist([x]), i).to_pylist()[0] for i, x in enumerate(all_examples)]
else:
expected = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = pa.Table.from_pylist(examples)
expected.extend(func(batch, list(range(batch_offset, batch_offset + len(batch)))).to_pylist())
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
@pytest.mark.parametrize(
"n, func, batched, batch_size, remove_columns",
[
(
3,
lambda t: t.append_column("id+1", pc.add(t["id"], 1)),
False,
None,
["extra_column"],
), # just add 1 to the id
(25, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 10, ["extra_column"]), # same with bs=10
(
50,
lambda t: pa.table({"foo": ["bar"] * np.random.default_rng(t["id"][0].as_py()).integers(0, 10)}),
True,
8,
["extra_column", "id"],
), # make a duplicate of each example
(5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, None, ["extra_column"]), # same with bs=None
(5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, -1, ["extra_column"]), # same with bs<=0
],
)
def test_mapped_examples_iterable_remove_columns_arrow_format(n, func, batched, batch_size, remove_columns):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n, "extra_column": "foo"})
ex_iterable = MappedExamplesIterable(
base_ex_iterable,
func,
batched=batched,
batch_size=batch_size,
remove_columns=remove_columns,
formatting=FormattingConfig(format_type="arrow"),
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
columns_to_remove = remove_columns if isinstance(remove_columns, list) else [remove_columns]
if batched is False:
expected = [
{**{k: v for k, v in func(pa.Table.from_pylist([x])).to_pylist()[0].items() if k not in columns_to_remove}}
for x in all_examples
]
else:
expected = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = pa.Table.from_pylist(examples)
expected.extend(
[{k: v for k, v in x.items() if k not in columns_to_remove} for x in func(batch).to_pylist()]
)
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
@pytest.mark.parametrize(
"n, func, batched, batch_size, fn_kwargs",
[
(3, lambda t, y=0: t.append_column("id+idx", pc.add(t["id"], y)), False, None, None),
(3, lambda t, y=0: t.append_column("id+idx", pc.add(t["id"], y)), False, None, {"y": 3}),
(25, lambda t, y=0: t.append_column("id+idx", pc.add(t["id"], y)), True, 10, {"y": 3}),
(5, lambda t, y=0: t.append_column("id+idx", pc.add(t["id"], y)), True, None, {"y": 3}), # same with bs=None
(5, lambda t, y=0: t.append_column("id+idx", pc.add(t["id"], y)), True, -1, {"y": 3}), # same with bs<=0
],
)
def test_mapped_examples_iterable_fn_kwargs_and_arrow_format(n, func, batched, batch_size, fn_kwargs):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = MappedExamplesIterable(
base_ex_iterable,
func,
batched=batched,
batch_size=batch_size,
fn_kwargs=fn_kwargs,
formatting=FormattingConfig(format_type="arrow"),
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
if fn_kwargs is None:
fn_kwargs = {}
if batched is False:
expected = [func(pa.Table.from_pylist([x]), **fn_kwargs).to_pylist()[0] for x in all_examples]
else:
expected = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = pa.Table.from_pylist(examples)
expected.extend(func(batch, **fn_kwargs).to_pylist())
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
@pytest.mark.parametrize(
"n, func, batched, batch_size, input_columns",
[
(3, lambda id_: pa.table({"id+1": pc.add(id_, 1)}), False, None, ["id"]), # just add 1 to the id
(25, lambda ids_: pa.table({"id+1": pc.add(ids_, 1)}), True, 10, ["id"]), # same with bs=10
(5, lambda ids_: pa.table({"id+1": pc.add(ids_, 1)}), True, None, ["id"]), # same with bs=None
(5, lambda ids_: pa.table({"id+1": pc.add(ids_, 1)}), True, -1, ["id"]), # same with bs<=0
],
)
def test_mapped_examples_iterable_input_columns_and_arrow_format(n, func, batched, batch_size, input_columns):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = MappedExamplesIterable(
base_ex_iterable,
func,
batched=batched,
batch_size=batch_size,
input_columns=input_columns,
formatting=FormattingConfig(format_type="arrow"),
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
columns_to_input = input_columns if isinstance(input_columns, list) else [input_columns]
if batched is False:
expected = [
func(*[pa.Table.from_pylist([x])[col] for col in columns_to_input]).to_pylist()[0] for x in all_examples
]
else:
expected = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = pa.Table.from_pylist(examples)
expected.extend(func(*[batch[col] for col in columns_to_input]).to_pylist())
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
@pytest.mark.parametrize(
"n, func, batched, batch_size",
[
(3, lambda x: x["id"] % 2 == 0, False, None), # keep even number
(3, lambda x: [x["id"][0] % 2 == 0], True, 1), # same with bs=1
(25, lambda x: [i % 2 == 0 for i in x["id"]], True, 10), # same with bs=10
(5, lambda x: [i % 2 == 0 for i in x["id"]], True, None), # same with bs=None
(5, lambda x: [i % 2 == 0 for i in x["id"]], True, -1), # same with bs<=0
(3, lambda x: False, False, None), # return 0 examples
(3, lambda x: [False] * len(x["id"]), True, 10), # same with bs=10
],
)
def test_filtered_examples_iterable(n, func, batched, batch_size):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = FilteredExamplesIterable(base_ex_iterable, func, batched=batched, batch_size=batch_size)
all_examples = [x for _, x in generate_examples_fn(n=n)]
if batched is False:
expected = [x for x in all_examples if func(x)]
else:
# For batched filter we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function
expected = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = _examples_to_batch(examples)
mask = func(batch)
expected.extend([x for x, to_keep in zip(examples, mask) if to_keep])
if expected:
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
@pytest.mark.parametrize(
"n, func, batched, batch_size",
[
(3, lambda x, index: index % 2 == 0, False, None), # keep even number
(25, lambda x, indices: [idx % 2 == 0 for idx in indices], True, 10), # same with bs=10
(5, lambda x, indices: [idx % 2 == 0 for idx in indices], True, None), # same with bs=None
(5, lambda x, indices: [idx % 2 == 0 for idx in indices], True, -1), # same with bs<=0
],
)
def test_filtered_examples_iterable_with_indices(n, func, batched, batch_size):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = FilteredExamplesIterable(
base_ex_iterable, func, batched=batched, batch_size=batch_size, with_indices=True
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
if batched is False:
expected = [x for idx, x in enumerate(all_examples) if func(x, idx)]
else:
# For batched filter we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function
expected = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = _examples_to_batch(examples)
indices = list(range(batch_offset, batch_offset + len(examples)))
mask = func(batch, indices)
expected.extend([x for x, to_keep in zip(examples, mask) if to_keep])
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
@pytest.mark.parametrize(
"n, func, batched, batch_size, input_columns",
[
(3, lambda id_: id_ % 2 == 0, False, None, ["id"]), # keep even number
(25, lambda ids_: [i % 2 == 0 for i in ids_], True, 10, ["id"]), # same with bs=10
(3, lambda ids_: [i % 2 == 0 for i in ids_], True, None, ["id"]), # same with bs=None
(3, lambda ids_: [i % 2 == 0 for i in ids_], True, None, ["id"]), # same with bs=None
],
)
def test_filtered_examples_iterable_input_columns(n, func, batched, batch_size, input_columns):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = FilteredExamplesIterable(
base_ex_iterable, func, batched=batched, batch_size=batch_size, input_columns=input_columns
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
columns_to_input = input_columns if isinstance(input_columns, list) else [input_columns]
if batched is False:
expected = [x for x in all_examples if func(*[x[col] for col in columns_to_input])]
else:
# For batched filter we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function
expected = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = _examples_to_batch(examples)
mask = func(*[batch[col] for col in columns_to_input])
expected.extend([x for x, to_keep in zip(examples, mask) if to_keep])
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
def test_skip_examples_iterable():
total, count = 10, 2
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": total})
skip_ex_iterable = SkipExamplesIterable(base_ex_iterable, n=count)
expected = list(generate_examples_fn(n=total))[count:]
assert list(skip_ex_iterable) == expected
assert (
skip_ex_iterable.shuffle_data_sources(np.random.default_rng(42)) is skip_ex_iterable
), "skip examples makes the shards order fixed"
def test_take_examples_iterable():
total, count = 10, 2
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": total})
take_ex_iterable = TakeExamplesIterable(base_ex_iterable, n=count)
expected = list(generate_examples_fn(n=total))[:count]
assert list(take_ex_iterable) == expected
assert (
take_ex_iterable.shuffle_data_sources(np.random.default_rng(42)) is take_ex_iterable
), "skip examples makes the shards order fixed"
def test_vertically_concatenated_examples_iterable():
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label": 10})
ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label": 5})
concatenated_ex_iterable = VerticallyConcatenatedMultiSourcesExamplesIterable([ex_iterable1, ex_iterable2])
expected = [x for _, x in ex_iterable1] + [x for _, x in ex_iterable2]
assert [x for _, x in concatenated_ex_iterable] == expected
def test_vertically_concatenated_examples_iterable_with_different_columns():
# having different columns is supported
# Though iterable datasets fill the missing data with nulls
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label": 10})
ex_iterable2 = ExamplesIterable(generate_examples_fn, {})
concatenated_ex_iterable = VerticallyConcatenatedMultiSourcesExamplesIterable([ex_iterable1, ex_iterable2])
expected = [x for _, x in ex_iterable1] + [x for _, x in ex_iterable2]
assert [x for _, x in concatenated_ex_iterable] == expected
def test_vertically_concatenated_examples_iterable_shuffle_data_sources():
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label": 10})
ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label": 5})
concatenated_ex_iterable = VerticallyConcatenatedMultiSourcesExamplesIterable([ex_iterable1, ex_iterable2])
rng = np.random.default_rng(42)
shuffled_ex_iterable = concatenated_ex_iterable.shuffle_data_sources(rng)
# make sure the list of examples iterables is shuffled, and each examples iterable is shuffled
expected = [x for _, x in ex_iterable2.shuffle_data_sources(rng)] + [
x for _, x in ex_iterable1.shuffle_data_sources(rng)
]
assert [x for _, x in shuffled_ex_iterable] == expected
def test_horizontally_concatenated_examples_iterable():
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label1": 10})
ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label2": 5})
concatenated_ex_iterable = HorizontallyConcatenatedMultiSourcesExamplesIterable([ex_iterable1, ex_iterable2])
with pytest.raises(ValueError): # column "id" is duplicated -> raise an error
list(concatenated_ex_iterable)
ex_iterable2 = MappedExamplesIterable(ex_iterable2, lambda x: x, remove_columns=["id"])
concatenated_ex_iterable = HorizontallyConcatenatedMultiSourcesExamplesIterable([ex_iterable1, ex_iterable2])
expected = [{**x, **y} for (_, x), (_, y) in zip(ex_iterable1, ex_iterable2)]
assert [x for _, x in concatenated_ex_iterable] == expected
assert (
concatenated_ex_iterable.shuffle_data_sources(np.random.default_rng(42)) is concatenated_ex_iterable
), "horizontally concatenated examples makes the shards order fixed"
@pytest.mark.parametrize(
"ex_iterable",
[
ExamplesIterable(generate_examples_fn, {}),
ShuffledDataSourcesExamplesIterable(generate_examples_fn, {}, np.random.default_rng(42)),
SelectColumnsIterable(ExamplesIterable(generate_examples_fn, {}), ["id"]),
StepExamplesIterable(ExamplesIterable(generate_examples_fn, {}), 2, 0),
CyclingMultiSourcesExamplesIterable([ExamplesIterable(generate_examples_fn, {})]),
VerticallyConcatenatedMultiSourcesExamplesIterable([ExamplesIterable(generate_examples_fn, {})]),
HorizontallyConcatenatedMultiSourcesExamplesIterable([ExamplesIterable(generate_examples_fn, {})]),
RandomlyCyclingMultiSourcesExamplesIterable(
[ExamplesIterable(generate_examples_fn, {})], np.random.default_rng(42)
),
MappedExamplesIterable(ExamplesIterable(generate_examples_fn, {}), lambda x: x),
MappedExamplesIterable(ArrowExamplesIterable(generate_tables_fn, {}), lambda x: x),
FilteredExamplesIterable(ExamplesIterable(generate_examples_fn, {}), lambda x: True),
FilteredExamplesIterable(ArrowExamplesIterable(generate_tables_fn, {}), lambda x: True),
BufferShuffledExamplesIterable(ExamplesIterable(generate_examples_fn, {}), 10, np.random.default_rng(42)),
SkipExamplesIterable(ExamplesIterable(generate_examples_fn, {}), 10),
TakeExamplesIterable(ExamplesIterable(generate_examples_fn, {}), 10),
TypedExamplesIterable(
ExamplesIterable(generate_examples_fn, {}), Features({"id": Value("int32")}), token_per_repo_id={}
),
],
)
def test_no_iter_arrow(ex_iterable: _BaseExamplesIterable):
assert ex_iterable.iter_arrow is None
@pytest.mark.parametrize(
"ex_iterable",
[
ArrowExamplesIterable(generate_tables_fn, {}),
ShuffledDataSourcesArrowExamplesIterable(generate_tables_fn, {}, np.random.default_rng(42)),
SelectColumnsIterable(ArrowExamplesIterable(generate_tables_fn, {}), ["id"]),
# StepExamplesIterable(ArrowExamplesIterable(generate_tables_fn, {}), 2, 0), # not implemented
# CyclingMultiSourcesExamplesIterable([ArrowExamplesIterable(generate_tables_fn, {})]), # not implemented
VerticallyConcatenatedMultiSourcesExamplesIterable([ArrowExamplesIterable(generate_tables_fn, {})]),
# HorizontallyConcatenatedMultiSourcesExamplesIterable([ArrowExamplesIterable(generate_tables_fn, {})]), # not implemented
# RandomlyCyclingMultiSourcesExamplesIterable([ArrowExamplesIterable(generate_tables_fn, {})], np.random.default_rng(42)), # not implemented
MappedExamplesIterable(
ExamplesIterable(generate_examples_fn, {}), lambda t: t, formatting=FormattingConfig(format_type="arrow")
),
MappedExamplesIterable(
ArrowExamplesIterable(generate_tables_fn, {}),
lambda t: t,
formatting=FormattingConfig(format_type="arrow"),
),
FilteredExamplesIterable(
ExamplesIterable(generate_examples_fn, {}),
lambda t: True,
formatting=FormattingConfig(format_type="arrow"),
),
FilteredExamplesIterable(
ArrowExamplesIterable(generate_tables_fn, {}),
lambda t: True,
formatting=FormattingConfig(format_type="arrow"),
),
# BufferShuffledExamplesIterable(ArrowExamplesIterable(generate_tables_fn, {}), 10, np.random.default_rng(42)), # not implemented
# SkipExamplesIterable(ArrowExamplesIterable(generate_tables_fn, {}), 10), # not implemented
# TakeExamplesIterable(ArrowExamplesIterable(generate_tables_fn, {}), 10), # not implemented
TypedExamplesIterable(
ArrowExamplesIterable(generate_tables_fn, {}), Features({"id": Value("int32")}), token_per_repo_id={}
),
],
)
def test_iter_arrow(ex_iterable: _BaseExamplesIterable):
assert ex_iterable.iter_arrow is not None
key, pa_table = next(ex_iterable.iter_arrow())
assert isinstance(pa_table, pa.Table)
############################
#
# IterableDataset tests
#
############################
def test_iterable_dataset():
dataset = IterableDataset(ExamplesIterable(generate_examples_fn, {}))
expected = [x for _, x in generate_examples_fn()]
assert next(iter(dataset)) == expected[0]
assert list(dataset) == expected
def test_iterable_dataset_from_generator():
data = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
def gen():
yield from data
dataset = IterableDataset.from_generator(gen)
assert isinstance(dataset, IterableDataset)
assert list(dataset) == data
def test_iterable_dataset_from_generator_with_shards():
def gen(shard_names):
for shard_name in shard_names:
for i in range(10):
yield {"shard_name": shard_name, "i": i}
shard_names = [f"data{shard_idx}.txt" for shard_idx in range(4)]
dataset = IterableDataset.from_generator(gen, gen_kwargs={"shard_names": shard_names})
assert isinstance(dataset, IterableDataset)
assert dataset.n_shards == len(shard_names)
def test_iterable_dataset_from_file(dataset: IterableDataset, arrow_file: str):
with assert_arrow_memory_doesnt_increase():
dataset_from_file = IterableDataset.from_file(arrow_file)
expected_features = dataset._resolve_features().features
assert dataset_from_file.features.type == expected_features.type
assert dataset_from_file.features == expected_features
assert isinstance(dataset_from_file, IterableDataset)
assert list(dataset_from_file) == list(dataset)
@require_not_windows
@require_dill_gt_0_3_2
@require_pyspark
def test_from_spark_streaming():
import pyspark
spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
data = [
("0", 0, 0.0),
("1", 1, 1.0),
("2", 2, 2.0),
("3", 3, 3.0),
]
df = spark.createDataFrame(data, "col_1: string, col_2: int, col_3: float")
dataset = IterableDataset.from_spark(df)
assert isinstance(dataset, IterableDataset)
results = []
for ex in dataset:
results.append(ex)
assert results == [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
@require_not_windows
@require_dill_gt_0_3_2
@require_pyspark
def test_from_spark_streaming_features():
import PIL.Image
import pyspark
spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
data = [(0, np.arange(4 * 4 * 3).reshape(4, 4, 3).tolist())]
df = spark.createDataFrame(data, "idx: int, image: array<array<array<int>>>")
features = Features({"idx": Value("int64"), "image": Image()})
dataset = IterableDataset.from_spark(
df,
features=features,
)
assert isinstance(dataset, IterableDataset)
results = []
for ex in dataset:
results.append(ex)
assert len(results) == 1
isinstance(results[0]["image"], PIL.Image.Image)
@require_torch
def test_iterable_dataset_torch_integration():
ex_iterable = ExamplesIterable(generate_examples_fn, {})
dataset = IterableDataset(ex_iterable)
import torch.utils.data
assert isinstance(dataset, torch.utils.data.IterableDataset)
assert isinstance(dataset, IterableDataset)
assert dataset._ex_iterable is ex_iterable
@require_torch
def test_iterable_dataset_torch_picklable():
import pickle
ex_iterable = ExamplesIterable(generate_examples_fn, {})
dataset = IterableDataset(ex_iterable, formatting=FormattingConfig(format_type="torch"))
reloaded_dataset = pickle.loads(pickle.dumps(dataset))
import torch.utils.data
assert isinstance(reloaded_dataset, IterableDataset)
assert isinstance(reloaded_dataset, torch.utils.data.IterableDataset)
assert reloaded_dataset._formatting.format_type == "torch"
assert len(list(dataset)) == len(list(reloaded_dataset))
@require_torch
def test_iterable_dataset_with_format_torch():
ex_iterable = ExamplesIterable(generate_examples_fn, {})
dataset = IterableDataset(ex_iterable)
from torch.utils.data import DataLoader
dataloader = DataLoader(dataset)
assert len(list(dataloader)) == len(list(ex_iterable))
@require_torch
def test_iterable_dataset_torch_dataloader_parallel():
from torch.utils.data import DataLoader
ex_iterable = ExamplesIterable(generate_examples_fn, {})
dataset = IterableDataset(ex_iterable)
dataloader = DataLoader(dataset, num_workers=2, batch_size=None)
result = list(dataloader)
expected = [example for _, example in ex_iterable]
assert len(result) == len(expected)
assert {str(x) for x in result} == {str(x) for x in expected}
@require_torch
@pytest.mark.filterwarnings("ignore:This DataLoader will create:UserWarning")
@pytest.mark.parametrize("n_shards, num_workers", [(2, 1), (2, 2), (3, 2), (2, 3)])
def test_sharded_iterable_dataset_torch_dataloader_parallel(n_shards, num_workers):
from torch.utils.data import DataLoader
ex_iterable = ExamplesIterable(generate_examples_fn, {"filepaths": [f"{i}.txt" for i in range(n_shards)]})
dataset = IterableDataset(ex_iterable)
dataloader = DataLoader(dataset, batch_size=None, num_workers=num_workers)
result = list(dataloader)
expected = [example for _, example in ex_iterable]
assert len(result) == len(expected)
assert {str(x) for x in result} == {str(x) for x in expected}
@require_torch
@pytest.mark.integration
@pytest.mark.parametrize("num_workers", [1, 2])
def test_iterable_dataset_from_hub_torch_dataloader_parallel(num_workers, tmp_path):
from torch.utils.data import DataLoader
dataset = load_dataset(SAMPLE_DATASET_IDENTIFIER, cache_dir=str(tmp_path), streaming=True, split="train")
dataloader = DataLoader(dataset, batch_size=None, num_workers=num_workers)
result = list(dataloader)
assert len(result) == 2
@pytest.mark.parametrize("batch_size", [4, 5])
@pytest.mark.parametrize("drop_last_batch", [False, True])
def test_iterable_dataset_iter_batch(batch_size, drop_last_batch):
n = 25
dataset = IterableDataset(ExamplesIterable(generate_examples_fn, {"n": n}))
all_examples = [ex for _, ex in generate_examples_fn(n=n)]
expected = []
for i in range(0, len(all_examples), batch_size):
if len(all_examples[i : i + batch_size]) < batch_size and drop_last_batch:
continue
expected.append(_examples_to_batch(all_examples[i : i + batch_size]))
assert next(iter(dataset.iter(batch_size, drop_last_batch=drop_last_batch))) == expected[0]
assert list(dataset.iter(batch_size, drop_last_batch=drop_last_batch)) == expected
def test_iterable_dataset_info():
info = DatasetInfo(description="desc", citation="@article{}", size_in_bytes=42)
ex_iterable = ExamplesIterable(generate_examples_fn, {})
dataset = IterableDataset(ex_iterable, info=info)
assert dataset.info == info
assert dataset.description == info.description
assert dataset.citation == info.citation
assert dataset.size_in_bytes == info.size_in_bytes
def test_iterable_dataset_set_epoch(dataset: IterableDataset):
assert dataset._epoch == 0
dataset.set_epoch(42)
assert dataset._epoch == 42
@pytest.mark.parametrize("seed", [None, 42, 1337])
@pytest.mark.parametrize("epoch", [None, 0, 1, 10])
def test_iterable_dataset_set_epoch_of_shuffled_dataset(dataset: IterableDataset, seed, epoch):
buffer_size = 10
shuffled_dataset = dataset.shuffle(seed, buffer_size=buffer_size)
base_generator = shuffled_dataset._shuffling.generator
if epoch is not None:
shuffled_dataset.set_epoch(epoch)
effective_generator = shuffled_dataset._effective_generator()
assert effective_generator is not None
if epoch is None or epoch == 0:
assert is_rng_equal(base_generator, shuffled_dataset._effective_generator())
else:
assert not is_rng_equal(base_generator, shuffled_dataset._effective_generator())
effective_seed = deepcopy(base_generator).integers(0, 1 << 63) - epoch
assert is_rng_equal(np.random.default_rng(effective_seed), shuffled_dataset._effective_generator())
def test_iterable_dataset_map(
dataset: IterableDataset,
):
func = lambda x: {"id+1": x["id"] + 1} # noqa: E731
mapped_dataset = dataset.map(func)
assert isinstance(mapped_dataset._ex_iterable, MappedExamplesIterable)
assert mapped_dataset._ex_iterable.function is func
assert mapped_dataset._ex_iterable.batched is False
assert next(iter(mapped_dataset)) == {**next(iter(dataset)), **func(next(iter(generate_examples_fn()))[1])}
def test_iterable_dataset_map_batched(
dataset: IterableDataset,
):
func = lambda x: {"id+1": [i + 1 for i in x["id"]]} # noqa: E731
batch_size = 3
dataset = dataset.map(func, batched=True, batch_size=batch_size)
assert isinstance(dataset._ex_iterable, MappedExamplesIterable)
assert dataset._ex_iterable.function is func
assert dataset._ex_iterable.batch_size == batch_size
assert next(iter(dataset)) == {"id": 0, "id+1": 1}
def test_iterable_dataset_map_complex_features(
dataset: IterableDataset,
):
# https://github.com/huggingface/datasets/issues/3505
ex_iterable = ExamplesIterable(generate_examples_fn, {"label": "positive"})
features = Features(
{
"id": Value("int64"),
"label": Value("string"),
}
)
dataset = IterableDataset(ex_iterable, info=DatasetInfo(features=features))
dataset = dataset.cast_column("label", ClassLabel(names=["negative", "positive"]))
dataset = dataset.map(lambda x: {"id+1": x["id"] + 1, **x})
assert isinstance(dataset._ex_iterable, MappedExamplesIterable)
features["label"] = ClassLabel(names=["negative", "positive"])
assert [{k: v for k, v in ex.items() if k != "id+1"} for ex in dataset] == [
features.encode_example(ex) for _, ex in ex_iterable
]
def test_iterable_dataset_map_with_features(dataset: IterableDataset) -> None:
# https://github.com/huggingface/datasets/issues/3888
ex_iterable = ExamplesIterable(generate_examples_fn, {"label": "positive"})
features_before_map = Features(
{
"id": Value("int64"),
"label": Value("string"),
}
)
dataset = IterableDataset(ex_iterable, info=DatasetInfo(features=features_before_map))
assert dataset.info.features is not None
assert dataset.info.features == features_before_map
features_after_map = Features(
{
"id": Value("int64"),
"label": Value("string"),
"target": Value("string"),
}
)
dataset = dataset.map(lambda x: {"target": x["label"]}, features=features_after_map)
assert dataset.info.features is not None
assert dataset.info.features == features_after_map
def test_iterable_dataset_map_with_fn_kwargs(dataset: IterableDataset) -> None:
fn_kwargs = {"y": 1}
mapped_dataset = dataset.map(lambda x, y: {"id+y": x["id"] + y}, fn_kwargs=fn_kwargs)
assert mapped_dataset._ex_iterable.batched is False
assert next(iter(mapped_dataset)) == {"id": 0, "id+y": 1}
batch_size = 3
mapped_dataset = dataset.map(
lambda x, y: {"id+y": [i + y for i in x["id"]]}, batched=True, batch_size=batch_size, fn_kwargs=fn_kwargs
)
assert isinstance(mapped_dataset._ex_iterable, MappedExamplesIterable)
assert mapped_dataset._ex_iterable.batch_size == batch_size
assert next(iter(mapped_dataset)) == {"id": 0, "id+y": 1}
def test_iterable_dataset_filter(dataset: IterableDataset) -> None:
fn_kwargs = {"y": 1}
filtered_dataset = dataset.filter(lambda x, y: x["id"] == y, fn_kwargs=fn_kwargs)
assert filtered_dataset._ex_iterable.batched is False
assert next(iter(filtered_dataset)) == {"id": 1}
@pytest.mark.parametrize("seed", [42, 1337, 101010, 123456])
@pytest.mark.parametrize("epoch", [None, 0, 1])
def test_iterable_dataset_shuffle(dataset: IterableDataset, seed, epoch):
buffer_size = 3
dataset = deepcopy(dataset)
dataset._ex_iterable.kwargs["filepaths"] = ["0.txt", "1.txt"]
dataset = dataset.shuffle(seed, buffer_size=buffer_size)
assert isinstance(dataset._shuffling, ShufflingConfig)
assert isinstance(dataset._shuffling.generator, np.random.Generator)
assert is_rng_equal(dataset._shuffling.generator, np.random.default_rng(seed))
# Effective seed is sum of seed and epoch
if epoch is None or epoch == 0:
effective_seed = seed
else:
dataset.set_epoch(epoch)
effective_seed = np.random.default_rng(seed).integers(0, 1 << 63) - epoch
# Shuffling adds a shuffle buffer
expected_first_example_index = next(
iter(BufferShuffledExamplesIterable._iter_random_indices(np.random.default_rng(effective_seed), buffer_size))
)
assert isinstance(dataset._ex_iterable, BufferShuffledExamplesIterable)
# It also shuffles the underlying examples iterable
expected_ex_iterable = ExamplesIterable(
generate_examples_fn, {"filepaths": ["0.txt", "1.txt"]}
).shuffle_data_sources(np.random.default_rng(effective_seed))
assert isinstance(dataset._ex_iterable.ex_iterable, ExamplesIterable)
assert next(iter(dataset)) == list(islice(expected_ex_iterable, expected_first_example_index + 1))[-1][1]
@pytest.mark.parametrize(
"features",
[
None,
Features(
{
"id": Value("int64"),
"label": Value("int64"),
}
),
Features(
{
"id": Value("int64"),
"label": ClassLabel(names=["negative", "positive"]),
}
),
],
)
def test_iterable_dataset_features(features):
ex_iterable = ExamplesIterable(generate_examples_fn, {"label": 0})
dataset = IterableDataset(ex_iterable, info=DatasetInfo(features=features))
if features:
expected = [features.encode_example(x) for _, x in ex_iterable]
else:
expected = [x for _, x in ex_iterable]
assert list(dataset) == expected
def test_iterable_dataset_features_cast_to_python():
ex_iterable = ExamplesIterable(
generate_examples_fn, {"timestamp": pd.Timestamp(2020, 1, 1), "array": np.ones(5), "n": 1}
)
features = Features(
{
"id": Value("int64"),
"timestamp": Value("timestamp[us]"),
"array": [Value("int64")],
}
)
dataset = IterableDataset(ex_iterable, info=DatasetInfo(features=features))
assert list(dataset) == [{"timestamp": pd.Timestamp(2020, 1, 1).to_pydatetime(), "array": [1] * 5, "id": 0}]
@pytest.mark.parametrize("format_type", [None, "torch", "python", "tf", "tensorflow", "np", "numpy", "jax"])
def test_iterable_dataset_with_format(dataset: IterableDataset, format_type):
formatted_dataset = dataset.with_format(format_type)
assert formatted_dataset._formatting.format_type == get_format_type_from_alias(format_type)
@require_torch
def test_iterable_dataset_is_torch_iterable_dataset(dataset: IterableDataset):
from torch.utils.data import DataLoader, _DatasetKind
dataloader = DataLoader(dataset)
assert dataloader._dataset_kind == _DatasetKind.Iterable
out = list(dataloader)
assert len(out) == DEFAULT_N_EXAMPLES
@pytest.mark.parametrize("n", [0, 2, int(1e10)])
def test_iterable_dataset_skip(dataset: IterableDataset, n):
skip_dataset = dataset.skip(n)
assert isinstance(skip_dataset._ex_iterable, SkipExamplesIterable)
assert skip_dataset._ex_iterable.n == n
assert list(skip_dataset) == list(dataset)[n:]
@pytest.mark.parametrize("n", [0, 2, int(1e10)])
def test_iterable_dataset_take(dataset: IterableDataset, n):
take_dataset = dataset.take(n)
assert isinstance(take_dataset._ex_iterable, TakeExamplesIterable)
assert take_dataset._ex_iterable.n == n
assert list(take_dataset) == list(dataset)[:n]
@pytest.mark.parametrize("method", ["skip", "take"])
def test_iterable_dataset_shuffle_after_skip_or_take(method):
seed = 42
n, n_shards = 3, 10
count = 7
ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n, "filepaths": [f"{i}.txt" for i in range(n_shards)]})
dataset = IterableDataset(ex_iterable)
dataset = dataset.skip(n) if method == "skip" else dataset.take(count)
shuffled_dataset = dataset.shuffle(seed, buffer_size=DEFAULT_N_EXAMPLES)
# shuffling a skip/take dataset should keep the same examples and don't shuffle the shards
key = lambda x: f"{x['filepath']}_{x['id']}" # noqa: E731
assert sorted(dataset, key=key) == sorted(shuffled_dataset, key=key)
def test_iterable_dataset_add_column(dataset_with_several_columns):
new_column = list(range(DEFAULT_N_EXAMPLES))
new_dataset = dataset_with_several_columns.add_column("new_column", new_column)
assert list(new_dataset) == [
{**example, "new_column": idx} for idx, example in enumerate(dataset_with_several_columns)
]
new_dataset = new_dataset._resolve_features()
assert "new_column" in new_dataset.column_names
def test_iterable_dataset_rename_column(dataset_with_several_columns):
new_dataset = dataset_with_several_columns.rename_column("id", "new_id")
assert list(new_dataset) == [
{("new_id" if k == "id" else k): v for k, v in example.items()} for example in dataset_with_several_columns
]
assert new_dataset.features is None
assert new_dataset.column_names is None
# rename the column if ds.features was not None
new_dataset = dataset_with_several_columns._resolve_features().rename_column("id", "new_id")
assert new_dataset.features is not None
assert new_dataset.column_names is not None
assert "id" not in new_dataset.column_names
assert "new_id" in new_dataset.column_names
def test_iterable_dataset_rename_columns(dataset_with_several_columns):
column_mapping = {"id": "new_id", "filepath": "filename"}
new_dataset = dataset_with_several_columns.rename_columns(column_mapping)
assert list(new_dataset) == [
{column_mapping.get(k, k): v for k, v in example.items()} for example in dataset_with_several_columns
]
assert new_dataset.features is None
assert new_dataset.column_names is None
# rename the columns if ds.features was not None
new_dataset = dataset_with_several_columns._resolve_features().rename_columns(column_mapping)
assert new_dataset.features is not None
assert new_dataset.column_names is not None
assert all(c not in new_dataset.column_names for c in ["id", "filepath"])
assert all(c in new_dataset.column_names for c in ["new_id", "filename"])
def test_iterable_dataset_remove_columns(dataset_with_several_columns):
new_dataset = dataset_with_several_columns.remove_columns("id")
assert list(new_dataset) == [
{k: v for k, v in example.items() if k != "id"} for example in dataset_with_several_columns
]
assert new_dataset.features is None
new_dataset = dataset_with_several_columns.remove_columns(["id", "filepath"])
assert list(new_dataset) == [
{k: v for k, v in example.items() if k != "id" and k != "filepath"} for example in dataset_with_several_columns
]
assert new_dataset.features is None
assert new_dataset.column_names is None
# remove the columns if ds.features was not None
new_dataset = dataset_with_several_columns._resolve_features().remove_columns(["id", "filepath"])
assert new_dataset.features is not None
assert new_dataset.column_names is not None
assert all(c not in new_dataset.features for c in ["id", "filepath"])
assert all(c not in new_dataset.column_names for c in ["id", "filepath"])
def test_iterable_dataset_select_columns(dataset_with_several_columns):
new_dataset = dataset_with_several_columns.select_columns("id")
assert list(new_dataset) == [
{k: v for k, v in example.items() if k == "id"} for example in dataset_with_several_columns
]
assert new_dataset.features is None
new_dataset = dataset_with_several_columns.select_columns(["id", "filepath"])
assert list(new_dataset) == [
{k: v for k, v in example.items() if k in ("id", "filepath")} for example in dataset_with_several_columns
]
assert new_dataset.features is None
# select the columns if ds.features was not None
new_dataset = dataset_with_several_columns._resolve_features().select_columns(["id", "filepath"])
assert new_dataset.features is not None
assert new_dataset.column_names is not None
assert all(c in new_dataset.features for c in ["id", "filepath"])
assert all(c in new_dataset.column_names for c in ["id", "filepath"])
def test_iterable_dataset_cast_column():
ex_iterable = ExamplesIterable(generate_examples_fn, {"label": 10})
features = Features({"id": Value("int64"), "label": Value("int64")})
dataset = IterableDataset(ex_iterable, info=DatasetInfo(features=features))
casted_dataset = dataset.cast_column("label", Value("bool"))
casted_features = features.copy()
casted_features["label"] = Value("bool")
assert list(casted_dataset) == [casted_features.encode_example(ex) for _, ex in ex_iterable]
def test_iterable_dataset_cast():
ex_iterable = ExamplesIterable(generate_examples_fn, {"label": 10})
features = Features({"id": Value("int64"), "label": Value("int64")})
dataset = IterableDataset(ex_iterable, info=DatasetInfo(features=features))
new_features = Features({"id": Value("int64"), "label": Value("bool")})
casted_dataset = dataset.cast(new_features)
assert list(casted_dataset) == [new_features.encode_example(ex) for _, ex in ex_iterable]
def test_iterable_dataset_resolve_features():
ex_iterable = ExamplesIterable(generate_examples_fn, {})
dataset = IterableDataset(ex_iterable)
assert dataset.features is None
assert dataset.column_names is None
dataset = dataset._resolve_features()
assert dataset.features == Features(
{
"id": Value("int64"),
}
)
assert dataset.column_names == ["id"]
def test_iterable_dataset_resolve_features_keep_order():
def gen():
yield from zip(range(3), [{"a": 1}, {"c": 1}, {"b": 1}])
ex_iterable = ExamplesIterable(gen, {})
dataset = IterableDataset(ex_iterable)._resolve_features()
# columns appear in order of appearance in the dataset
assert list(dataset.features) == ["a", "c", "b"]
assert dataset.column_names == ["a", "c", "b"]
def test_iterable_dataset_with_features_fill_with_none():
def gen():
yield from zip(range(2), [{"a": 1}, {"b": 1}])
ex_iterable = ExamplesIterable(gen, {})
info = DatasetInfo(features=Features({"a": Value("int32"), "b": Value("int32")}))
dataset = IterableDataset(ex_iterable, info=info)
assert list(dataset) == [{"a": 1, "b": None}, {"b": 1, "a": None}]
def test_concatenate_datasets():
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label": 10})
dataset1 = IterableDataset(ex_iterable1)
ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label": 5})
dataset2 = IterableDataset(ex_iterable2)
concatenated_dataset = concatenate_datasets([dataset1, dataset2])
assert list(concatenated_dataset) == list(dataset1) + list(dataset2)
def test_concatenate_datasets_resolves_features():
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label": 10})
dataset1 = IterableDataset(ex_iterable1)
ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label": 5})
dataset2 = IterableDataset(ex_iterable2)
concatenated_dataset = concatenate_datasets([dataset1, dataset2])
assert concatenated_dataset.features is not None
assert sorted(concatenated_dataset.features) == ["id", "label"]
def test_concatenate_datasets_with_different_columns():
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label": 10})
dataset1 = IterableDataset(ex_iterable1)
ex_iterable2 = ExamplesIterable(generate_examples_fn, {})
dataset2 = IterableDataset(ex_iterable2)
# missing column "label" -> it should be replaced with nulls
extended_dataset2_list = [{"label": None, **x} for x in dataset2]
concatenated_dataset = concatenate_datasets([dataset1, dataset2])
assert list(concatenated_dataset) == list(dataset1) + extended_dataset2_list
# change order
concatenated_dataset = concatenate_datasets([dataset2, dataset1])
assert list(concatenated_dataset) == extended_dataset2_list + list(dataset1)
def test_concatenate_datasets_axis_1():
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label1": 10})
dataset1 = IterableDataset(ex_iterable1)
ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label2": 5})
dataset2 = IterableDataset(ex_iterable2)
with pytest.raises(ValueError): # column "id" is duplicated -> raise an error
concatenate_datasets([dataset1, dataset2], axis=1)
concatenated_dataset = concatenate_datasets([dataset1, dataset2.remove_columns("id")], axis=1)
assert list(concatenated_dataset) == [{**x, **y} for x, y in zip(dataset1, dataset2)]
def test_concatenate_datasets_axis_1_resolves_features():
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label1": 10})
dataset1 = IterableDataset(ex_iterable1)
ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label2": 5})
dataset2 = IterableDataset(ex_iterable2).remove_columns("id")
concatenated_dataset = concatenate_datasets([dataset1, dataset2], axis=1)
assert concatenated_dataset.features is not None
assert sorted(concatenated_dataset.features) == ["id", "label1", "label2"]
def test_concatenate_datasets_axis_1_with_different_lengths():
n1 = 10
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label1": 10, "n": n1})
dataset1 = IterableDataset(ex_iterable1)
n2 = 5
ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label2": 5, "n": n2})
dataset2 = IterableDataset(ex_iterable2).remove_columns("id")
# missing rows -> they should be replaced with nulls
extended_dataset2_list = list(dataset2) + [{"label2": None}] * (n1 - n2)
concatenated_dataset = concatenate_datasets([dataset1, dataset2], axis=1)
assert list(concatenated_dataset) == [{**x, **y} for x, y in zip(dataset1, extended_dataset2_list)]
# change order
concatenated_dataset = concatenate_datasets([dataset2, dataset1], axis=1)
assert list(concatenated_dataset) == [{**x, **y} for x, y in zip(extended_dataset2_list, dataset1)]
@pytest.mark.parametrize(
"probas, seed, expected_length, stopping_strategy",
[
(None, None, 3 * (DEFAULT_N_EXAMPLES - 1) + 1, "first_exhausted"),
([1, 0, 0], None, DEFAULT_N_EXAMPLES, "first_exhausted"),
([0, 1, 0], None, DEFAULT_N_EXAMPLES, "first_exhausted"),
([0.2, 0.5, 0.3], 42, None, "first_exhausted"),
([0.1, 0.1, 0.8], 1337, None, "first_exhausted"),
([0.5, 0.2, 0.3], 101010, None, "first_exhausted"),
(None, None, 3 * DEFAULT_N_EXAMPLES, "all_exhausted"),
([0.2, 0.5, 0.3], 42, None, "all_exhausted"),
([0.1, 0.1, 0.8], 1337, None, "all_exhausted"),
([0.5, 0.2, 0.3], 101010, None, "all_exhausted"),
],
)
def test_interleave_datasets(dataset: IterableDataset, probas, seed, expected_length, stopping_strategy):
d1 = dataset
d2 = dataset.map(lambda x: {"id+1": x["id"] + 1, **x})
d3 = dataset.with_format("python")
datasets = [d1, d2, d3]
merged_dataset = interleave_datasets(
datasets, probabilities=probas, seed=seed, stopping_strategy=stopping_strategy
)
def fill_default(example):
return {"id": None, "id+1": None, **example}
# Check the examples iterable
assert isinstance(
merged_dataset._ex_iterable, (CyclingMultiSourcesExamplesIterable, RandomlyCyclingMultiSourcesExamplesIterable)
)
# Check that it is deterministic
if seed is not None:
merged_dataset2 = interleave_datasets(
[d1, d2, d3], probabilities=probas, seed=seed, stopping_strategy=stopping_strategy
)
assert list(merged_dataset) == list(merged_dataset2)
# Check features
assert merged_dataset.features == Features({"id": Value("int64"), "id+1": Value("int64")})
# Check first example
if seed is not None:
rng = np.random.default_rng(seed)
i = next(iter(RandomlyCyclingMultiSourcesExamplesIterable._iter_random_indices(rng, len(datasets), p=probas)))
assert next(iter(merged_dataset)) == fill_default(next(iter(datasets[i])))
else:
assert any(next(iter(merged_dataset)) == fill_default(next(iter(dataset))) for dataset in datasets)
# Compute length it case it's random
if expected_length is None:
expected_length = 0
counts = np.array([len(list(d)) for d in datasets])
bool_strategy_func = np.all if stopping_strategy == "all_exhausted" else np.any
rng = np.random.default_rng(seed)
for i in RandomlyCyclingMultiSourcesExamplesIterable._iter_random_indices(rng, len(datasets), p=probas):
counts[i] -= 1
expected_length += 1
if bool_strategy_func(counts <= 0):
break
# Check length
assert len(list(merged_dataset)) == expected_length
def test_interleave_datasets_with_features(
dataset: IterableDataset,
):
features = Features(
{
"id": Value("int64"),
"label": ClassLabel(names=["negative", "positive"]),
}
)
ex_iterable = ExamplesIterable(generate_examples_fn, {"label": 0})
dataset_with_features = IterableDataset(ex_iterable, info=DatasetInfo(features=features))
merged_dataset = interleave_datasets([dataset, dataset_with_features])
assert merged_dataset.features == features
def test_interleave_datasets_with_oversampling():
# Test hardcoded results
d1 = IterableDataset(ExamplesIterable((lambda: (yield from [(i, {"a": i}) for i in [0, 1, 2]])), {}))
d2 = IterableDataset(ExamplesIterable((lambda: (yield from [(i, {"a": i}) for i in [10, 11, 12, 13]])), {}))
d3 = IterableDataset(ExamplesIterable((lambda: (yield from [(i, {"a": i}) for i in [20, 21, 22, 23, 24]])), {}))
expected_values = [0, 10, 20, 1, 11, 21, 2, 12, 22, 0, 13, 23, 1, 10, 24]
# Check oversampling strategy without probabilities
assert [x["a"] for x in interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted")] == expected_values
# Check oversampling strategy with probabilities
expected_values = [20, 0, 21, 10, 1, 22, 23, 24, 2, 0, 1, 20, 11, 21, 2, 0, 12, 1, 22, 13]
values = [
x["a"]
for x in interleave_datasets(
[d1, d2, d3], probabilities=[0.5, 0.2, 0.3], seed=42, stopping_strategy="all_exhausted"
)
]
assert values == expected_values
@require_torch
def test_with_format_torch(dataset_with_several_columns: IterableDataset):
import torch
dset = dataset_with_several_columns.with_format(type="torch")
example = next(iter(dset))
batch = next(iter(dset.iter(batch_size=3)))
assert len(example) == 3
assert isinstance(example["id"], torch.Tensor)
assert list(example["id"].shape) == []
assert example["id"].item() == 0
assert isinstance(batch["id"], torch.Tensor)
assert isinstance(example["filepath"], list)
assert isinstance(example["filepath"][0], str)
assert example["filepath"][0] == "data0.txt"
assert isinstance(batch["filepath"], list)
assert isinstance(example["metadata"], dict)
assert isinstance(example["metadata"]["sources"], list)
assert isinstance(example["metadata"]["sources"][0], str)
assert isinstance(batch["metadata"], list)
@require_tf
def test_with_format_tf(dataset_with_several_columns: IterableDataset):
import tensorflow as tf
dset = dataset_with_several_columns.with_format(type="tensorflow")
example = next(iter(dset))
batch = next(iter(dset.iter(batch_size=3)))
assert isinstance(example["id"], tf.Tensor)
assert list(example["id"].shape) == []
assert example["id"].numpy().item() == 0
assert isinstance(batch["id"], tf.Tensor)
assert isinstance(example["filepath"], tf.Tensor)
assert example["filepath"][0] == b"data0.txt"
assert isinstance(batch["filepath"], tf.Tensor)
assert isinstance(example["metadata"], dict)
assert isinstance(example["metadata"]["sources"], tf.Tensor)
assert isinstance(batch["metadata"], list)
def test_map_array_are_not_converted_back_to_lists(dataset: IterableDataset):
def func(example):
return {"array": np.array([1, 2, 3])}
dset_test = dataset.map(func)
example = next(iter(dset_test))
# not aligned with Dataset.map because we don't convert back to lists after map()
assert isinstance(example["array"], np.ndarray)
def test_formatted_map(dataset: IterableDataset):
dataset = dataset.with_format("np")
assert isinstance(next(dataset.iter(batch_size=3))["id"], np.ndarray)
dataset = dataset.with_format(None)
assert isinstance(next(dataset.iter(batch_size=3))["id"], list)
def add_one_numpy(example):
assert isinstance(example["id"], np.ndarray)
return {"id": example["id"] + 1}
dataset = dataset.with_format("np")
dataset = dataset.map(add_one_numpy, batched=True)
assert isinstance(next(dataset.iter(batch_size=3))["id"], np.ndarray)
dataset = dataset.with_format(None)
assert isinstance(next(dataset.iter(batch_size=3))["id"], list)
@pytest.mark.parametrize("n_shards1, n_shards2, num_workers", [(2, 1, 1), (2, 2, 2), (1, 3, 1), (4, 3, 3)])
def test_interleave_dataset_with_sharding(n_shards1, n_shards2, num_workers):
from torch.utils.data import DataLoader
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"filepaths": [f"{i}-1.txt" for i in range(n_shards1)]})
dataset1 = IterableDataset(ex_iterable1).with_format("torch")
ex_iterable2 = ExamplesIterable(generate_examples_fn, {"filepaths": [f"{i}-2.txt" for i in range(n_shards2)]})
dataset2 = IterableDataset(ex_iterable2).with_format("torch")
dataset_merged = interleave_datasets([dataset1, dataset2], stopping_strategy="first_exhausted")
assert dataset_merged.n_shards == min(n_shards1, n_shards2)
dataloader = DataLoader(dataset_merged, batch_size=None, num_workers=num_workers)
result = list(dataloader)
expected_length = 2 * min(
len([example for _, example in ex_iterable1]), len([example for _, example in ex_iterable2])
)
# some samples may be missing because the stopping strategy is applied per process
assert expected_length - num_workers <= len(result) <= expected_length
assert len(result) == len({str(x) for x in result})
def filter_func(batch):
return batch["id"] == 4
def map_func(batch):
batch["id"] *= 2
return batch
def test_pickle_after_many_transforms(dataset_with_several_columns):
dataset = dataset_with_several_columns
dataset = dataset.remove_columns(["filepath"])
dataset = dataset.take(5)
dataset = dataset.map(map_func)
dataset = dataset.shuffle()
dataset = dataset.skip(1)
dataset = dataset.filter(filter_func)
dataset = dataset.add_column("additional_col", ["something"])
dataset = dataset.rename_column("metadata", "metadata1")
dataset = dataset.rename_columns({"id": "id1", "metadata1": "metadata2"})
dataset = dataset.select_columns(["id1", "additional_col"])
unpickled_dataset = pickle.loads(pickle.dumps(dataset))
assert list(unpickled_dataset) == list(dataset)
| datasets/tests/test_iterable_dataset.py/0 | {
"file_path": "datasets/tests/test_iterable_dataset.py",
"repo_id": "datasets",
"token_count": 36328
} | 84 |
import unittest
from unittest.mock import patch
import pytest
from pytest import CaptureFixture
from datasets.utils import (
are_progress_bars_disabled,
disable_progress_bars,
enable_progress_bars,
tqdm,
)
class TestTqdmUtils(unittest.TestCase):
@pytest.fixture(autouse=True)
def capsys(self, capsys: CaptureFixture) -> None:
"""Workaround to make capsys work in unittest framework.
Capsys is a convenient pytest fixture to capture stdout.
See https://waylonwalker.com/pytest-capsys/.
Taken from https://github.com/pytest-dev/pytest/issues/2504#issuecomment-309475790.
"""
self.capsys = capsys
def setUp(self) -> None:
"""Get verbosity to set it back after the tests."""
self._previous_are_progress_bars_disabled = are_progress_bars_disabled()
return super().setUp()
def tearDown(self) -> None:
"""Set back progress bars verbosity as before testing."""
if self._previous_are_progress_bars_disabled:
disable_progress_bars()
else:
enable_progress_bars()
@patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", None)
def test_tqdm_helpers(self) -> None:
"""Test helpers to enable/disable progress bars."""
disable_progress_bars()
self.assertTrue(are_progress_bars_disabled())
enable_progress_bars()
self.assertFalse(are_progress_bars_disabled())
@patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", True)
def test_cannot_enable_tqdm_when_env_variable_is_set(self) -> None:
"""
Test helpers cannot enable/disable progress bars when
`HF_DATASETS_DISABLE_PROGRESS_BARS` is set.
"""
disable_progress_bars()
self.assertTrue(are_progress_bars_disabled())
with self.assertWarns(UserWarning):
enable_progress_bars()
self.assertTrue(are_progress_bars_disabled()) # Still disabled !
@patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", False)
def test_cannot_disable_tqdm_when_env_variable_is_set(self) -> None:
"""
Test helpers cannot enable/disable progress bars when
`HF_DATASETS_DISABLE_PROGRESS_BARS` is set.
"""
enable_progress_bars()
self.assertFalse(are_progress_bars_disabled())
with self.assertWarns(UserWarning):
disable_progress_bars()
self.assertFalse(are_progress_bars_disabled()) # Still enabled !
@patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", None)
def test_tqdm_disabled(self) -> None:
"""Test TQDM not outputting anything when globally disabled."""
disable_progress_bars()
for _ in tqdm(range(10)):
pass
captured = self.capsys.readouterr()
self.assertEqual(captured.out, "")
self.assertEqual(captured.err, "")
@patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", None)
def test_tqdm_disabled_cannot_be_forced(self) -> None:
"""Test TQDM cannot be forced when globally disabled."""
disable_progress_bars()
for _ in tqdm(range(10), disable=False):
pass
captured = self.capsys.readouterr()
self.assertEqual(captured.out, "")
self.assertEqual(captured.err, "")
@patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", None)
def test_tqdm_can_be_disabled_when_globally_enabled(self) -> None:
"""Test TQDM can still be locally disabled even when globally enabled."""
enable_progress_bars()
for _ in tqdm(range(10), disable=True):
pass
captured = self.capsys.readouterr()
self.assertEqual(captured.out, "")
self.assertEqual(captured.err, "")
@patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", None)
def test_tqdm_enabled(self) -> None:
"""Test TQDM work normally when globally enabled."""
enable_progress_bars()
for _ in tqdm(range(10)):
pass
captured = self.capsys.readouterr()
self.assertEqual(captured.out, "")
self.assertIn("10/10", captured.err) # tqdm log
| datasets/tests/test_tqdm.py/0 | {
"file_path": "datasets/tests/test_tqdm.py",
"repo_id": "datasets",
"token_count": 1804
} | 85 |
<jupyter_start><jupyter_text>Unit 4: Code your first Deep Reinforcement Learning Algorithm with PyTorch: Reinforce. And test its robustness 💪In this notebook, you'll code your first Deep Reinforcement Learning algorithm from scratch: Reinforce (also called Monte Carlo Policy Gradient).Reinforce is a *Policy-based method*: a Deep Reinforcement Learning algorithm that tries **to optimize the policy directly without using an action-value function**.More precisely, Reinforce is a *Policy-gradient method*, a subclass of *Policy-based methods* that aims **to optimize the policy directly by estimating the weights of the optimal policy using gradient ascent**.To test its robustness, we're going to train it in 2 different simple environments:- Cartpole-v1- PixelcopterEnv⬇️ Here is an example of what **you will achieve at the end of this notebook.** ⬇️ 🎮 Environments: - [CartPole-v1](https://www.gymlibrary.dev/environments/classic_control/cart_pole/)- [PixelCopter](https://pygame-learning-environment.readthedocs.io/en/latest/user/games/pixelcopter.html) 📚 RL-Library: - Python- PyTorchWe're constantly trying to improve our tutorials, so **if you find some issues in this notebook**, please [open an issue on the GitHub Repo](https://github.com/huggingface/deep-rl-class/issues). Objectives of this notebook 🏆At the end of the notebook, you will:- Be able to **code from scratch a Reinforce algorithm using PyTorch.**- Be able to **test the robustness of your agent using simple environments.**- Be able to **push your trained agent to the Hub** with a nice video replay and an evaluation score 🔥. This notebook is from the Deep Reinforcement Learning Course In this free course, you will:- 📖 Study Deep Reinforcement Learning in **theory and practice**.- 🧑💻 Learn to **use famous Deep RL libraries** such as Stable Baselines3, RL Baselines3 Zoo, CleanRL and Sample Factory 2.0.- 🤖 Train **agents in unique environments** And more check 📚 the syllabus 👉 https://simoninithomas.github.io/deep-rl-courseDon’t forget to **sign up to the course** (we are collecting your email to be able to **send you the links when each Unit is published and give you information about the challenges and updates).**The best way to keep in touch is to join our discord server to exchange with the community and with us 👉🏻 https://discord.gg/ydHrjt3WP5 Prerequisites 🏗️Before diving into the notebook, you need to:🔲 📚 [Study Policy Gradients by reading Unit 4](https://huggingface.co/deep-rl-course/unit4/introduction) Let's code Reinforce algorithm from scratch 🔥To validate this hands-on for the certification process, you need to push your trained models to the Hub.- Get a result of >= 350 for `Cartpole-v1`.- Get a result of >= 5 for `PixelCopter`.To find your result, go to the leaderboard and find your model, **the result = mean_reward - std of reward**. **If you don't see your model on the leaderboard, go at the bottom of the leaderboard page and click on the refresh button**.For more information about the certification process, check this section 👉 https://huggingface.co/deep-rl-course/en/unit0/introductioncertification-process An advice 💡It's better to run this colab in a copy on your Google Drive, so that **if it timeouts** you still have the saved notebook on your Google Drive and do not need to fill everything from scratch.To do that you can either do `Ctrl + S` or `File > Save a copy in Google Drive.` Set the GPU 💪- To **accelerate the agent's training, we'll use a GPU**. To do that, go to `Runtime > Change Runtime type` - `Hardware Accelerator > GPU` Create a virtual display 🖥During the notebook, we'll need to generate a replay video. To do so, with colab, **we need to have a virtual screen to be able to render the environment** (and thus record the frames). Hence the following cell will install the librairies and create and run a virtual screen 🖥<jupyter_code>%%capture
!apt install python-opengl
!apt install ffmpeg
!apt install xvfb
!pip install pyvirtualdisplay
!pip install pyglet==1.5.1
# Virtual display
from pyvirtualdisplay import Display
virtual_display = Display(visible=0, size=(1400, 900))
virtual_display.start()<jupyter_output><empty_output><jupyter_text>Install the dependencies 🔽The first step is to install the dependencies. We’ll install multiple ones:- `gym`- `gym-games`: Extra gym environments made with PyGame.- `huggingface_hub`: 🤗 works as a central place where anyone can share and explore models and datasets. It has versioning, metrics, visualizations, and other features that will allow you to easily collaborate with others.You may be wondering why we install gym and not gymnasium, a more recent version of gym? **Because the gym-games we are using are not updated yet with gymnasium**. The differences you'll encounter here:- In `gym` we don't have `terminated` and `truncated` but only `done`.- In `gym` using `env.step()` returns `state, reward, done, info`You can learn more about the differences between Gym and Gymnasium here 👉 https://gymnasium.farama.org/content/migration-guide/You can see here all the Reinforce models available 👉 https://huggingface.co/models?other=reinforceAnd you can find all the Deep Reinforcement Learning models here 👉 https://huggingface.co/models?pipeline_tag=reinforcement-learning<jupyter_code>!pip install -r https://raw.githubusercontent.com/huggingface/deep-rl-class/main/notebooks/unit4/requirements-unit4.txt<jupyter_output><empty_output><jupyter_text>Import the packages 📦In addition to import the installed libraries, we also import:- `imageio`: A library that will help us to generate a replay video<jupyter_code>import numpy as np
from collections import deque
import matplotlib.pyplot as plt
%matplotlib inline
# PyTorch
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Categorical
# Gym
import gym
import gym_pygame
# Hugging Face Hub
from huggingface_hub import notebook_login # To log to our Hugging Face account to be able to upload models to the Hub.
import imageio<jupyter_output><empty_output><jupyter_text>Check if we have a GPU- Let's check if we have a GPU- If it's the case you should see `device:cuda0`<jupyter_code>device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)<jupyter_output><empty_output><jupyter_text>We're now ready to implement our Reinforce algorithm 🔥 First agent: Playing CartPole-v1 🤖 Create the CartPole environment and understand how it works [The environment 🎮](https://www.gymlibrary.dev/environments/classic_control/cart_pole/) Why do we use a simple environment like CartPole-v1?As explained in [Reinforcement Learning Tips and Tricks](https://stable-baselines3.readthedocs.io/en/master/guide/rl_tips.html), when you implement your agent from scratch you need **to be sure that it works correctly and find bugs with easy environments before going deeper**. Since finding bugs will be much easier in simple environments.> Try to have some “sign of life” on toy problems> Validate the implementation by making it run on harder and harder envs (you can compare results against the RL zoo). You usually need to run hyperparameter optimization for that step.___ The CartPole-v1 environment> A pole is attached by an un-actuated joint to a cart, which moves along a frictionless track. The pendulum is placed upright on the cart and the goal is to balance the pole by applying forces in the left and right direction on the cart.So, we start with CartPole-v1. The goal is to push the cart left or right **so that the pole stays in the equilibrium.**The episode ends if:- The pole Angle is greater than ±12°- Cart Position is greater than ±2.4- Episode length is greater than 500We get a reward 💰 of +1 every timestep the Pole stays in the equilibrium.<jupyter_code>env_id = "CartPole-v1"
# Create the env
env = gym.make(env_id)
# Create the evaluation env
eval_env = gym.make(env_id)
# Get the state space and action space
s_size = env.observation_space.shape[0]
a_size = env.action_space.n
print("_____OBSERVATION SPACE_____ \n")
print("The State Space is: ", s_size)
print("Sample observation", env.observation_space.sample()) # Get a random observation
print("\n _____ACTION SPACE_____ \n")
print("The Action Space is: ", a_size)
print("Action Space Sample", env.action_space.sample()) # Take a random action<jupyter_output><empty_output><jupyter_text>Let's build the Reinforce ArchitectureThis implementation is based on two implementations:- [PyTorch official Reinforcement Learning example](https://github.com/pytorch/examples/blob/main/reinforcement_learning/reinforce.py)- [Udacity Reinforce](https://github.com/udacity/deep-reinforcement-learning/blob/master/reinforce/REINFORCE.ipynb)- [Improvement of the integration by Chris1nexus](https://github.com/huggingface/deep-rl-class/pull/95) So we want:- Two fully connected layers (fc1 and fc2).- Using ReLU as activation function of fc1- Using Softmax to output a probability distribution over actions<jupyter_code>class Policy(nn.Module):
def __init__(self, s_size, a_size, h_size):
super(Policy, self).__init__()
# Create two fully connected layers
def forward(self, x):
# Define the forward pass
# state goes to fc1 then we apply ReLU activation function
# fc1 outputs goes to fc2
# We output the softmax
def act(self, state):
"""
Given a state, take action
"""
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
probs = self.forward(state).cpu()
m = Categorical(probs)
action = np.argmax(m)
return action.item(), m.log_prob(action)<jupyter_output><empty_output><jupyter_text>Solution<jupyter_code>class Policy(nn.Module):
def __init__(self, s_size, a_size, h_size):
super(Policy, self).__init__()
self.fc1 = nn.Linear(s_size, h_size)
self.fc2 = nn.Linear(h_size, a_size)
def forward(self, x):
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.softmax(x, dim=1)
def act(self, state):
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
probs = self.forward(state).cpu()
m = Categorical(probs)
action = np.argmax(m)
return action.item(), m.log_prob(action)<jupyter_output><empty_output><jupyter_text>I make a mistake, can you guess where?- To find out let's make a forward pass:<jupyter_code>debug_policy = Policy(s_size, a_size, 64).to(device)
debug_policy.act(env.reset())<jupyter_output><empty_output><jupyter_text>- Here we see that the error says `ValueError: The value argument to log_prob must be a Tensor`- It means that `action` in `m.log_prob(action)` must be a Tensor **but it's not.**- Do you know why? Check the act function and try to see why it does not work. Advice 💡: Something is wrong in this implementation. Remember that we act function **we want to sample an action from the probability distribution over actions**. (Real) Solution<jupyter_code>class Policy(nn.Module):
def __init__(self, s_size, a_size, h_size):
super(Policy, self).__init__()
self.fc1 = nn.Linear(s_size, h_size)
self.fc2 = nn.Linear(h_size, a_size)
def forward(self, x):
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.softmax(x, dim=1)
def act(self, state):
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
probs = self.forward(state).cpu()
m = Categorical(probs)
action = m.sample()
return action.item(), m.log_prob(action)<jupyter_output><empty_output><jupyter_text>By using CartPole, it was easier to debug since **we know that the bug comes from our integration and not from our simple environment**. - Since **we want to sample an action from the probability distribution over actions**, we can't use `action = np.argmax(m)` since it will always output the action that have the highest probability.- We need to replace with `action = m.sample()` that will sample an action from the probability distribution P(.|s) Let's build the Reinforce Training AlgorithmThis is the Reinforce algorithm pseudocode: - When we calculate the return Gt (line 6) we see that we calculate the sum of discounted rewards **starting at timestep t**.- Why? Because our policy should only **reinforce actions on the basis of the consequences**: so rewards obtained before taking an action are useless (since they were not because of the action), **only the ones that come after the action matters**.- Before coding this you should read this section [don't let the past distract you](https://spinningup.openai.com/en/latest/spinningup/rl_intro3.htmldon-t-let-the-past-distract-you) that explains why we use reward-to-go policy gradient.We use an interesting technique coded by [Chris1nexus](https://github.com/Chris1nexus) to **compute the return at each timestep efficiently**. The comments explained the procedure. Don't hesitate also [to check the PR explanation](https://github.com/huggingface/deep-rl-class/pull/95)But overall the idea is to **compute the return at each timestep efficiently**. The second question you may ask is **why do we minimize the loss**? You talked about Gradient Ascent not Gradient Descent?- We want to maximize our utility function $J(\theta)$ but in PyTorch like in Tensorflow it's better to **minimize an objective function.** - So let's say we want to reinforce action 3 at a certain timestep. Before training this action P is 0.25. - So we want to modify $\theta$ such that $\pi_\theta(a_3|s; \theta) > 0.25$ - Because all P must sum to 1, max $\pi_\theta(a_3|s; \theta)$ will **minimize other action probability.** - So we should tell PyTorch **to min $1 - \pi_\theta(a_3|s; \theta)$.** - This loss function approaches 0 as $\pi_\theta(a_3|s; \theta)$ nears 1. - So we are encouraging the gradient to max $\pi_\theta(a_3|s; \theta)$<jupyter_code>def reinforce(policy, optimizer, n_training_episodes, max_t, gamma, print_every):
# Help us to calculate the score during the training
scores_deque = deque(maxlen=100)
scores = []
# Line 3 of pseudocode
for i_episode in range(1, n_training_episodes+1):
saved_log_probs = []
rewards = []
state = # TODO: reset the environment
# Line 4 of pseudocode
for t in range(max_t):
action, log_prob = # TODO get the action
saved_log_probs.append(log_prob)
state, reward, done, _ = # TODO: take an env step
rewards.append(reward)
if done:
break
scores_deque.append(sum(rewards))
scores.append(sum(rewards))
# Line 6 of pseudocode: calculate the return
returns = deque(maxlen=max_t)
n_steps = len(rewards)
# Compute the discounted returns at each timestep,
# as the sum of the gamma-discounted return at time t (G_t) + the reward at time t
# In O(N) time, where N is the number of time steps
# (this definition of the discounted return G_t follows the definition of this quantity
# shown at page 44 of Sutton&Barto 2017 2nd draft)
# G_t = r_(t+1) + r_(t+2) + ...
# Given this formulation, the returns at each timestep t can be computed
# by re-using the computed future returns G_(t+1) to compute the current return G_t
# G_t = r_(t+1) + gamma*G_(t+1)
# G_(t-1) = r_t + gamma* G_t
# (this follows a dynamic programming approach, with which we memorize solutions in order
# to avoid computing them multiple times)
# This is correct since the above is equivalent to (see also page 46 of Sutton&Barto 2017 2nd draft)
# G_(t-1) = r_t + gamma*r_(t+1) + gamma*gamma*r_(t+2) + ...
## Given the above, we calculate the returns at timestep t as:
# gamma[t] * return[t] + reward[t]
#
## We compute this starting from the last timestep to the first, in order
## to employ the formula presented above and avoid redundant computations that would be needed
## if we were to do it from first to last.
## Hence, the queue "returns" will hold the returns in chronological order, from t=0 to t=n_steps
## thanks to the appendleft() function which allows to append to the position 0 in constant time O(1)
## a normal python list would instead require O(N) to do this.
for t in range(n_steps)[::-1]:
disc_return_t = (returns[0] if len(returns)>0 else 0)
returns.appendleft( ) # TODO: complete here
## standardization of the returns is employed to make training more stable
eps = np.finfo(np.float32).eps.item()
## eps is the smallest representable float, which is
# added to the standard deviation of the returns to avoid numerical instabilities
returns = torch.tensor(returns)
returns = (returns - returns.mean()) / (returns.std() + eps)
# Line 7:
policy_loss = []
for log_prob, disc_return in zip(saved_log_probs, returns):
policy_loss.append(-log_prob * disc_return)
policy_loss = torch.cat(policy_loss).sum()
# Line 8: PyTorch prefers gradient descent
optimizer.zero_grad()
policy_loss.backward()
optimizer.step()
if i_episode % print_every == 0:
print('Episode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_deque)))
return scores<jupyter_output><empty_output><jupyter_text>Solution<jupyter_code>def reinforce(policy, optimizer, n_training_episodes, max_t, gamma, print_every):
# Help us to calculate the score during the training
scores_deque = deque(maxlen=100)
scores = []
# Line 3 of pseudocode
for i_episode in range(1, n_training_episodes+1):
saved_log_probs = []
rewards = []
state = env.reset()
# Line 4 of pseudocode
for t in range(max_t):
action, log_prob = policy.act(state)
saved_log_probs.append(log_prob)
state, reward, done, _ = env.step(action)
rewards.append(reward)
if done:
break
scores_deque.append(sum(rewards))
scores.append(sum(rewards))
# Line 6 of pseudocode: calculate the return
returns = deque(maxlen=max_t)
n_steps = len(rewards)
# Compute the discounted returns at each timestep,
# as
# the sum of the gamma-discounted return at time t (G_t) + the reward at time t
#
# In O(N) time, where N is the number of time steps
# (this definition of the discounted return G_t follows the definition of this quantity
# shown at page 44 of Sutton&Barto 2017 2nd draft)
# G_t = r_(t+1) + r_(t+2) + ...
# Given this formulation, the returns at each timestep t can be computed
# by re-using the computed future returns G_(t+1) to compute the current return G_t
# G_t = r_(t+1) + gamma*G_(t+1)
# G_(t-1) = r_t + gamma* G_t
# (this follows a dynamic programming approach, with which we memorize solutions in order
# to avoid computing them multiple times)
# This is correct since the above is equivalent to (see also page 46 of Sutton&Barto 2017 2nd draft)
# G_(t-1) = r_t + gamma*r_(t+1) + gamma*gamma*r_(t+2) + ...
## Given the above, we calculate the returns at timestep t as:
# gamma[t] * return[t] + reward[t]
#
## We compute this starting from the last timestep to the first, in order
## to employ the formula presented above and avoid redundant computations that would be needed
## if we were to do it from first to last.
## Hence, the queue "returns" will hold the returns in chronological order, from t=0 to t=n_steps
## thanks to the appendleft() function which allows to append to the position 0 in constant time O(1)
## a normal python list would instead require O(N) to do this.
for t in range(n_steps)[::-1]:
disc_return_t = (returns[0] if len(returns)>0 else 0)
returns.appendleft( gamma*disc_return_t + rewards[t] )
## standardization of the returns is employed to make training more stable
eps = np.finfo(np.float32).eps.item()
## eps is the smallest representable float, which is
# added to the standard deviation of the returns to avoid numerical instabilities
returns = torch.tensor(returns)
returns = (returns - returns.mean()) / (returns.std() + eps)
# Line 7:
policy_loss = []
for log_prob, disc_return in zip(saved_log_probs, returns):
policy_loss.append(-log_prob * disc_return)
policy_loss = torch.cat(policy_loss).sum()
# Line 8: PyTorch prefers gradient descent
optimizer.zero_grad()
policy_loss.backward()
optimizer.step()
if i_episode % print_every == 0:
print('Episode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_deque)))
return scores<jupyter_output><empty_output><jupyter_text>Train it- We're now ready to train our agent.- But first, we define a variable containing all the training hyperparameters.- You can change the training parameters (and should 😉)<jupyter_code>cartpole_hyperparameters = {
"h_size": 16,
"n_training_episodes": 1000,
"n_evaluation_episodes": 10,
"max_t": 1000,
"gamma": 1.0,
"lr": 1e-2,
"env_id": env_id,
"state_space": s_size,
"action_space": a_size,
}
# Create policy and place it to the device
cartpole_policy = Policy(cartpole_hyperparameters["state_space"], cartpole_hyperparameters["action_space"], cartpole_hyperparameters["h_size"]).to(device)
cartpole_optimizer = optim.Adam(cartpole_policy.parameters(), lr=cartpole_hyperparameters["lr"])
scores = reinforce(cartpole_policy,
cartpole_optimizer,
cartpole_hyperparameters["n_training_episodes"],
cartpole_hyperparameters["max_t"],
cartpole_hyperparameters["gamma"],
100)<jupyter_output><empty_output><jupyter_text>Define evaluation method 📝- Here we define the evaluation method that we're going to use to test our Reinforce agent.<jupyter_code>def evaluate_agent(env, max_steps, n_eval_episodes, policy):
"""
Evaluate the agent for ``n_eval_episodes`` episodes and returns average reward and std of reward.
:param env: The evaluation environment
:param n_eval_episodes: Number of episode to evaluate the agent
:param policy: The Reinforce agent
"""
episode_rewards = []
for episode in range(n_eval_episodes):
state = env.reset()
step = 0
done = False
total_rewards_ep = 0
for step in range(max_steps):
action, _ = policy.act(state)
new_state, reward, done, info = env.step(action)
total_rewards_ep += reward
if done:
break
state = new_state
episode_rewards.append(total_rewards_ep)
mean_reward = np.mean(episode_rewards)
std_reward = np.std(episode_rewards)
return mean_reward, std_reward<jupyter_output><empty_output><jupyter_text>Evaluate our agent 📈<jupyter_code>evaluate_agent(eval_env,
cartpole_hyperparameters["max_t"],
cartpole_hyperparameters["n_evaluation_episodes"],
cartpole_policy)<jupyter_output><empty_output><jupyter_text>Publish our trained model on the Hub 🔥Now that we saw we got good results after the training, we can publish our trained model on the hub 🤗 with one line of code.Here's an example of a Model Card: Push to the Hub Do not modify this code<jupyter_code>from huggingface_hub import HfApi, snapshot_download
from huggingface_hub.repocard import metadata_eval_result, metadata_save
from pathlib import Path
import datetime
import json
import imageio
import tempfile
import os
def record_video(env, policy, out_directory, fps=30):
"""
Generate a replay video of the agent
:param env
:param Qtable: Qtable of our agent
:param out_directory
:param fps: how many frame per seconds (with taxi-v3 and frozenlake-v1 we use 1)
"""
images = []
done = False
state = env.reset()
img = env.render(mode='rgb_array')
images.append(img)
while not done:
# Take the action (index) that have the maximum expected future reward given that state
action, _ = policy.act(state)
state, reward, done, info = env.step(action) # We directly put next_state = state for recording logic
img = env.render(mode='rgb_array')
images.append(img)
imageio.mimsave(out_directory, [np.array(img) for i, img in enumerate(images)], fps=fps)
def push_to_hub(repo_id,
model,
hyperparameters,
eval_env,
video_fps=30
):
"""
Evaluate, Generate a video and Upload a model to Hugging Face Hub.
This method does the complete pipeline:
- It evaluates the model
- It generates the model card
- It generates a replay video of the agent
- It pushes everything to the Hub
:param repo_id: repo_id: id of the model repository from the Hugging Face Hub
:param model: the pytorch model we want to save
:param hyperparameters: training hyperparameters
:param eval_env: evaluation environment
:param video_fps: how many frame per seconds to record our video replay
"""
_, repo_name = repo_id.split("/")
api = HfApi()
# Step 1: Create the repo
repo_url = api.create_repo(
repo_id=repo_id,
exist_ok=True,
)
with tempfile.TemporaryDirectory() as tmpdirname:
local_directory = Path(tmpdirname)
# Step 2: Save the model
torch.save(model, local_directory / "model.pt")
# Step 3: Save the hyperparameters to JSON
with open(local_directory / "hyperparameters.json", "w") as outfile:
json.dump(hyperparameters, outfile)
# Step 4: Evaluate the model and build JSON
mean_reward, std_reward = evaluate_agent(eval_env,
hyperparameters["max_t"],
hyperparameters["n_evaluation_episodes"],
model)
# Get datetime
eval_datetime = datetime.datetime.now()
eval_form_datetime = eval_datetime.isoformat()
evaluate_data = {
"env_id": hyperparameters["env_id"],
"mean_reward": mean_reward,
"n_evaluation_episodes": hyperparameters["n_evaluation_episodes"],
"eval_datetime": eval_form_datetime,
}
# Write a JSON file
with open(local_directory / "results.json", "w") as outfile:
json.dump(evaluate_data, outfile)
# Step 5: Create the model card
env_name = hyperparameters["env_id"]
metadata = {}
metadata["tags"] = [
env_name,
"reinforce",
"reinforcement-learning",
"custom-implementation",
"deep-rl-class"
]
# Add metrics
eval = metadata_eval_result(
model_pretty_name=repo_name,
task_pretty_name="reinforcement-learning",
task_id="reinforcement-learning",
metrics_pretty_name="mean_reward",
metrics_id="mean_reward",
metrics_value=f"{mean_reward:.2f} +/- {std_reward:.2f}",
dataset_pretty_name=env_name,
dataset_id=env_name,
)
# Merges both dictionaries
metadata = {**metadata, **eval}
model_card = f"""
# **Reinforce** Agent playing **{env_id}**
This is a trained model of a **Reinforce** agent playing **{env_id}** .
To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
"""
readme_path = local_directory / "README.md"
readme = ""
if readme_path.exists():
with readme_path.open("r", encoding="utf8") as f:
readme = f.read()
else:
readme = model_card
with readme_path.open("w", encoding="utf-8") as f:
f.write(readme)
# Save our metrics to Readme metadata
metadata_save(readme_path, metadata)
# Step 6: Record a video
video_path = local_directory / "replay.mp4"
record_video(env, model, video_path, video_fps)
# Step 7. Push everything to the Hub
api.upload_folder(
repo_id=repo_id,
folder_path=local_directory,
path_in_repo=".",
)
print(f"Your model is pushed to the Hub. You can view your model here: {repo_url}")<jupyter_output><empty_output><jupyter_text>.By using `push_to_hub` **you evaluate, record a replay, generate a model card of your agent and push it to the Hub**.This way:- You can **showcase our work** 🔥- You can **visualize your agent playing** 👀- You can **share with the community an agent that others can use** 💾- You can **access a leaderboard 🏆 to see how well your agent is performing compared to your classmates** 👉 https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard To be able to share your model with the community there are three more steps to follow:1️⃣ (If it's not already done) create an account to HF ➡ https://huggingface.co/join2️⃣ Sign in and then, you need to store your authentication token from the Hugging Face website.- Create a new token (https://huggingface.co/settings/tokens) **with write role**<jupyter_code>notebook_login()<jupyter_output><empty_output><jupyter_text>If you don't want to use a Google Colab or a Jupyter Notebook, you need to use this command instead: `huggingface-cli login` (or `login`) 3️⃣ We're now ready to push our trained agent to the 🤗 Hub 🔥 using `package_to_hub()` function<jupyter_code>repo_id = "" #TODO Define your repo id {username/Reinforce-{model-id}}
push_to_hub(repo_id,
cartpole_policy, # The model we want to save
cartpole_hyperparameters, # Hyperparameters
eval_env, # Evaluation environment
video_fps=30
)<jupyter_output><empty_output><jupyter_text>Now that we try the robustness of our implementation, let's try a more complex environment: PixelCopter 🚁 Second agent: PixelCopter 🚁 Study the PixelCopter environment 👀- [The Environment documentation](https://pygame-learning-environment.readthedocs.io/en/latest/user/games/pixelcopter.html)<jupyter_code>env_id = "Pixelcopter-PLE-v0"
env = gym.make(env_id)
eval_env = gym.make(env_id)
s_size = env.observation_space.shape[0]
a_size = env.action_space.n
print("_____OBSERVATION SPACE_____ \n")
print("The State Space is: ", s_size)
print("Sample observation", env.observation_space.sample()) # Get a random observation
print("\n _____ACTION SPACE_____ \n")
print("The Action Space is: ", a_size)
print("Action Space Sample", env.action_space.sample()) # Take a random action<jupyter_output><empty_output><jupyter_text>The observation space (7) 👀:- player y position- player velocity- player distance to floor- player distance to ceiling- next block x distance to player- next blocks top y location- next blocks bottom y locationThe action space(2) 🎮:- Up (press accelerator) - Do nothing (don't press accelerator) The reward function 💰: - For each vertical block it passes through it gains a positive reward of +1. Each time a terminal state reached it receives a negative reward of -1. Define the new Policy 🧠- We need to have a deeper neural network since the environment is more complex<jupyter_code>class Policy(nn.Module):
def __init__(self, s_size, a_size, h_size):
super(Policy, self).__init__()
# Define the three layers here
def forward(self, x):
# Define the forward process here
return F.softmax(x, dim=1)
def act(self, state):
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
probs = self.forward(state).cpu()
m = Categorical(probs)
action = m.sample()
return action.item(), m.log_prob(action)<jupyter_output><empty_output><jupyter_text>Solution<jupyter_code>class Policy(nn.Module):
def __init__(self, s_size, a_size, h_size):
super(Policy, self).__init__()
self.fc1 = nn.Linear(s_size, h_size)
self.fc2 = nn.Linear(h_size, h_size*2)
self.fc3 = nn.Linear(h_size*2, a_size)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return F.softmax(x, dim=1)
def act(self, state):
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
probs = self.forward(state).cpu()
m = Categorical(probs)
action = m.sample()
return action.item(), m.log_prob(action)<jupyter_output><empty_output><jupyter_text>Define the hyperparameters ⚙️- Because this environment is more complex.- Especially for the hidden size, we need more neurons.<jupyter_code>pixelcopter_hyperparameters = {
"h_size": 64,
"n_training_episodes": 50000,
"n_evaluation_episodes": 10,
"max_t": 10000,
"gamma": 0.99,
"lr": 1e-4,
"env_id": env_id,
"state_space": s_size,
"action_space": a_size,
}<jupyter_output><empty_output><jupyter_text>Train it- We're now ready to train our agent 🔥.<jupyter_code># Create policy and place it to the device
# torch.manual_seed(50)
pixelcopter_policy = Policy(pixelcopter_hyperparameters["state_space"], pixelcopter_hyperparameters["action_space"], pixelcopter_hyperparameters["h_size"]).to(device)
pixelcopter_optimizer = optim.Adam(pixelcopter_policy.parameters(), lr=pixelcopter_hyperparameters["lr"])
scores = reinforce(pixelcopter_policy,
pixelcopter_optimizer,
pixelcopter_hyperparameters["n_training_episodes"],
pixelcopter_hyperparameters["max_t"],
pixelcopter_hyperparameters["gamma"],
1000)<jupyter_output><empty_output><jupyter_text>Publish our trained model on the Hub 🔥<jupyter_code>repo_id = "" #TODO Define your repo id {username/Reinforce-{model-id}}
push_to_hub(repo_id,
pixelcopter_policy, # The model we want to save
pixelcopter_hyperparameters, # Hyperparameters
eval_env, # Evaluation environment
video_fps=30
)<jupyter_output><empty_output> | deep-rl-class/notebooks/unit4/unit4.ipynb/0 | {
"file_path": "deep-rl-class/notebooks/unit4/unit4.ipynb",
"repo_id": "deep-rl-class",
"token_count": 12740
} | 86 |
# The Exploration/Exploitation trade-off [[exp-exp-tradeoff]]
Finally, before looking at the different methods to solve Reinforcement Learning problems, we must cover one more very important topic: *the exploration/exploitation trade-off.*
- *Exploration* is exploring the environment by trying random actions in order to **find more information about the environment.**
- *Exploitation* is **exploiting known information to maximize the reward.**
Remember, the goal of our RL agent is to maximize the expected cumulative reward. However, **we can fall into a common trap**.
Let’s take an example:
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/exp_1.jpg" alt="Exploration" width="100%">
In this game, our mouse can have an **infinite amount of small cheese** (+1 each). But at the top of the maze, there is a gigantic sum of cheese (+1000).
However, if we only focus on exploitation, our agent will never reach the gigantic sum of cheese. Instead, it will only exploit **the nearest source of rewards,** even if this source is small (exploitation).
But if our agent does a little bit of exploration, it can **discover the big reward** (the pile of big cheese).
This is what we call the exploration/exploitation trade-off. We need to balance how much we **explore the environment** and how much we **exploit what we know about the environment.**
Therefore, we must **define a rule that helps to handle this trade-off**. We’ll see the different ways to handle it in the future units.
If it’s still confusing, **think of a real problem: the choice of picking a restaurant:**
<figure>
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/exp_2.jpg" alt="Exploration">
<figcaption>Source: <a href="https://inst.eecs.berkeley.edu/~cs188/sp20/assets/lecture/lec15_6up.pdf"> Berkley AI Course</a>
</figcaption>
</figure>
- *Exploitation*: You go to the same one that you know is good every day and **take the risk to miss another better restaurant.**
- *Exploration*: Try restaurants you never went to before, with the risk of having a bad experience **but the probable opportunity of a fantastic experience.**
To recap:
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/expexpltradeoff.jpg" alt="Exploration Exploitation Tradeoff" width="100%">
| deep-rl-class/units/en/unit1/exp-exp-tradeoff.mdx/0 | {
"file_path": "deep-rl-class/units/en/unit1/exp-exp-tradeoff.mdx",
"repo_id": "deep-rl-class",
"token_count": 699
} | 87 |
# Monte Carlo vs Temporal Difference Learning [[mc-vs-td]]
The last thing we need to discuss before diving into Q-Learning is the two learning strategies.
Remember that an RL agent **learns by interacting with its environment.** The idea is that **given the experience and the received reward, the agent will update its value function or policy.**
Monte Carlo and Temporal Difference Learning are two different **strategies on how to train our value function or our policy function.** Both of them **use experience to solve the RL problem.**
On one hand, Monte Carlo uses **an entire episode of experience before learning.** On the other hand, Temporal Difference uses **only a step ( \\(S_t, A_t, R_{t+1}, S_{t+1}\\) ) to learn.**
We'll explain both of them **using a value-based method example.**
## Monte Carlo: learning at the end of the episode [[monte-carlo]]
Monte Carlo waits until the end of the episode, calculates \\(G_t\\) (return) and uses it as **a target for updating \\(V(S_t)\\).**
So it requires a **complete episode of interaction before updating our value function.**
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/monte-carlo-approach.jpg" alt="Monte Carlo"/>
If we take an example:
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/MC-2.jpg" alt="Monte Carlo"/>
- We always start the episode **at the same starting point.**
- **The agent takes actions using the policy**. For instance, using an Epsilon Greedy Strategy, a policy that alternates between exploration (random actions) and exploitation.
- We get **the reward and the next state.**
- We terminate the episode if the cat eats the mouse or if the mouse moves > 10 steps.
- At the end of the episode, **we have a list of State, Actions, Rewards, and Next States tuples**
For instance [[State tile 3 bottom, Go Left, +1, State tile 2 bottom], [State tile 2 bottom, Go Left, +0, State tile 1 bottom]...]
- **The agent will sum the total rewards \\(G_t\\)** (to see how well it did).
- It will then **update \\(V(s_t)\\) based on the formula**
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/MC-3.jpg" alt="Monte Carlo"/>
- Then **start a new game with this new knowledge**
By running more and more episodes, **the agent will learn to play better and better.**
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/MC-3p.jpg" alt="Monte Carlo"/>
For instance, if we train a state-value function using Monte Carlo:
- We initialize our value function **so that it returns 0 value for each state**
- Our learning rate (lr) is 0.1 and our discount rate is 1 (= no discount)
- Our mouse **explores the environment and takes random actions**
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/MC-4.jpg" alt="Monte Carlo"/>
- The mouse made more than 10 steps, so the episode ends .
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/MC-4p.jpg" alt="Monte Carlo"/>
- We have a list of state, action, rewards, next_state, **we need to calculate the return \\(G{t=0}\\)**
\\(G_t = R_{t+1} + R_{t+2} + R_{t+3} ...\\) (for simplicity, we don't discount the rewards)
\\(G_0 = R_{1} + R_{2} + R_{3}…\\)
\\(G_0 = 1 + 0 + 0 + 0 + 0 + 0 + 1 + 1 + 0 + 0\\)
\\(G_0 = 3\\)
- We can now compute the **new** \\(V(S_0)\\):
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/MC-5.jpg" alt="Monte Carlo"/>
\\(V(S_0) = V(S_0) + lr * [G_0 — V(S_0)]\\)
\\(V(S_0) = 0 + 0.1 * [3 – 0]\\)
\\(V(S_0) = 0.3\\)
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/MC-5p.jpg" alt="Monte Carlo"/>
## Temporal Difference Learning: learning at each step [[td-learning]]
**Temporal Difference, on the other hand, waits for only one interaction (one step) \\(S_{t+1}\\)** to form a TD target and update \\(V(S_t)\\) using \\(R_{t+1}\\) and \\( \gamma * V(S_{t+1})\\).
The idea with **TD is to update the \\(V(S_t)\\) at each step.**
But because we didn't experience an entire episode, we don't have \\(G_t\\) (expected return). Instead, **we estimate \\(G_t\\) by adding \\(R_{t+1}\\) and the discounted value of the next state.**
This is called bootstrapping. It's called this **because TD bases its update in part on an existing estimate \\(V(S_{t+1})\\) and not a complete sample \\(G_t\\).**
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/TD-1.jpg" alt="Temporal Difference"/>
This method is called TD(0) or **one-step TD (update the value function after any individual step).**
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/TD-1p.jpg" alt="Temporal Difference"/>
If we take the same example,
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/TD-2.jpg" alt="Temporal Difference"/>
- We initialize our value function so that it returns 0 value for each state.
- Our learning rate (lr) is 0.1, and our discount rate is 1 (no discount).
- Our mouse begins to explore the environment and takes a random action: **going to the left**
- It gets a reward \\(R_{t+1} = 1\\) since **it eats a piece of cheese**
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/TD-2p.jpg" alt="Temporal Difference"/>
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/TD-3.jpg" alt="Temporal Difference"/>
We can now update \\(V(S_0)\\):
New \\(V(S_0) = V(S_0) + lr * [R_1 + \gamma * V(S_1) - V(S_0)]\\)
New \\(V(S_0) = 0 + 0.1 * [1 + 1 * 0–0]\\)
New \\(V(S_0) = 0.1\\)
So we just updated our value function for State 0.
Now we **continue to interact with this environment with our updated value function.**
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/TD-3p.jpg" alt="Temporal Difference"/>
To summarize:
- With *Monte Carlo*, we update the value function from a complete episode, and so we **use the actual accurate discounted return of this episode.**
- With *TD Learning*, we update the value function from a step, and we replace \\(G_t\\), which we don't know, with **an estimated return called the TD target.**
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Summary.jpg" alt="Summary"/>
| deep-rl-class/units/en/unit2/mc-vs-td.mdx/0 | {
"file_path": "deep-rl-class/units/en/unit2/mc-vs-td.mdx",
"repo_id": "deep-rl-class",
"token_count": 2316
} | 88 |
# Deep Q-Learning [[deep-q-learning]]
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/thumbnail.jpg" alt="Unit 3 thumbnail" width="100%">
In the last unit, we learned our first reinforcement learning algorithm: Q-Learning, **implemented it from scratch**, and trained it in two environments, FrozenLake-v1 ☃️ and Taxi-v3 🚕.
We got excellent results with this simple algorithm, but these environments were relatively simple because the **state space was discrete and small** (16 different states for FrozenLake-v1 and 500 for Taxi-v3). For comparison, the state space in Atari games can **contain \\(10^{9}\\) to \\(10^{11}\\) states**.
But as we'll see, producing and updating a **Q-table can become ineffective in large state space environments.**
So in this unit, **we'll study our first Deep Reinforcement Learning agent**: Deep Q-Learning. Instead of using a Q-table, Deep Q-Learning uses a Neural Network that takes a state and approximates Q-values for each action based on that state.
And **we'll train it to play Space Invaders and other Atari environments using [RL-Zoo](https://github.com/DLR-RM/rl-baselines3-zoo)**, a training framework for RL using Stable-Baselines that provides scripts for training, evaluating agents, tuning hyperparameters, plotting results, and recording videos.
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/atari-envs.gif" alt="Environments"/>
So let’s get started! 🚀
| deep-rl-class/units/en/unit3/introduction.mdx/0 | {
"file_path": "deep-rl-class/units/en/unit3/introduction.mdx",
"repo_id": "deep-rl-class",
"token_count": 437
} | 89 |
# How do Unity ML-Agents work? [[how-mlagents-works]]
Before training our agent, we need to understand **what ML-Agents is and how it works**.
## What is Unity ML-Agents? [[what-is-mlagents]]
[Unity ML-Agents](https://github.com/Unity-Technologies/ml-agents) is a toolkit for the game engine Unity that **allows us to create environments using Unity or use pre-made environments to train our agents**.
It’s developed by [Unity Technologies](https://unity.com/), the developers of Unity, one of the most famous Game Engines used by the creators of Firewatch, Cuphead, and Cities: Skylines.
<figure>
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit5/firewatch.jpeg" alt="Firewatch"/>
<figcaption>Firewatch was made with Unity</figcaption>
</figure>
## The six components [[six-components]]
With Unity ML-Agents, you have six essential components:
<figure>
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit5/mlagents-1.png" alt="MLAgents"/>
<figcaption>Source: <a href="https://unity-technologies.github.io/ml-agents/">Unity ML-Agents Documentation</a> </figcaption>
</figure>
- The first is the *Learning Environment*, which contains **the Unity scene (the environment) and the environment elements** (game characters).
- The second is the *Python Low-level API*, which contains **the low-level Python interface for interacting and manipulating the environment**. It’s the API we use to launch the training.
- Then, we have the *External Communicator* that **connects the Learning Environment (made with C#) with the low level Python API (Python)**.
- The *Python trainers*: the **Reinforcement algorithms made with PyTorch (PPO, SAC…)**.
- The *Gym wrapper*: to encapsulate the RL environment in a gym wrapper.
- The *PettingZoo wrapper*: PettingZoo is the multi-agents version of the gym wrapper.
## Inside the Learning Component [[inside-learning-component]]
Inside the Learning Component, we have **two important elements**:
- The first is the *agent component*, the actor of the scene. We’ll **train the agent by optimizing its policy** (which will tell us what action to take in each state). The policy is called the *Brain*.
- Finally, there is the *Academy*. This component **orchestrates agents and their decision-making processes**. Think of this Academy as a teacher who handles Python API requests.
To better understand its role, let’s remember the RL process. This can be modeled as a loop that works like this:
<figure>
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/RL_process.jpg" alt="The RL process" width="100%">
<figcaption>The RL Process: a loop of state, action, reward and next state</figcaption>
<figcaption>Source: <a href="http://incompleteideas.net/book/RLbook2020.pdf">Reinforcement Learning: An Introduction, Richard Sutton and Andrew G. Barto</a></figcaption>
</figure>
Now, let’s imagine an agent learning to play a platform game. The RL process looks like this:
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/RL_process_game.jpg" alt="The RL process" width="100%">
- Our Agent receives **state \\(S_0\\)** from the **Environment** — we receive the first frame of our game (Environment).
- Based on that **state \\(S_0\\),** the Agent takes **action \\(A_0\\)** — our Agent will move to the right.
- The environment goes to a **new** **state \\(S_1\\)** — new frame.
- The environment gives some **reward \\(R_1\\)** to the Agent — we’re not dead *(Positive Reward +1)*.
This RL loop outputs a sequence of **state, action, reward and next state.** The goal of the agent is to **maximize the expected cumulative reward**.
The Academy will be the one that will **send the order to our Agents and ensure that agents are in sync**:
- Collect Observations
- Select your action using your policy
- Take the Action
- Reset if you reached the max step or if you’re done.
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit5/academy.png" alt="The MLAgents Academy" width="100%">
Now that we understand how ML-Agents works, **we’re ready to train our agents.**
| deep-rl-class/units/en/unit5/how-mlagents-works.mdx/0 | {
"file_path": "deep-rl-class/units/en/unit5/how-mlagents-works.mdx",
"repo_id": "deep-rl-class",
"token_count": 1276
} | 90 |
# Introduction [[introduction]]
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit0/thumbnail.png" alt="Thumbnail"/>
Since the beginning of this course, we learned to train agents in a *single-agent system* where our agent was alone in its environment: it was **not cooperating or collaborating with other agents**.
This worked great, and the single-agent system is useful for many applications.
<figure>
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/patchwork.jpg" alt="Patchwork"/>
<figcaption>
A patchwork of all the environments you’ve trained your agents on since the beginning of the course
</figcaption>
</figure>
But, as humans, **we live in a multi-agent world**. Our intelligence comes from interaction with other agents. And so, our **goal is to create agents that can interact with other humans and other agents**.
Consequently, we must study how to train deep reinforcement learning agents in a *multi-agents system* to build robust agents that can adapt, collaborate, or compete.
So today we’re going to **learn the basics of the fascinating topic of multi-agents reinforcement learning (MARL)**.
And the most exciting part is that, during this unit, you’re going to train your first agents in a multi-agents system: **a 2vs2 soccer team that needs to beat the opponent team**.
And you’re going to participate in **AI vs. AI challenge** where your trained agent will compete against other classmates’ agents every day and be ranked on a [new leaderboard](https://huggingface.co/spaces/huggingface-projects/AIvsAI-SoccerTwos).
<figure>
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/soccertwos.gif" alt="SoccerTwos"/>
<figcaption>This environment was made by the <a href="https://github.com/Unity-Technologies/ml-agents">Unity MLAgents Team</a></figcaption>
</figure>
So let’s get started!
| deep-rl-class/units/en/unit7/introduction.mdx/0 | {
"file_path": "deep-rl-class/units/en/unit7/introduction.mdx",
"repo_id": "deep-rl-class",
"token_count": 574
} | 91 |
# Introduction [[introduction]]
In this bonus unit, we'll reinforce what we learned in the first unit by teaching Huggy the Dog to fetch the stick and then [play with him directly in your browser](https://huggingface.co/spaces/ThomasSimonini/Huggy) 🐶
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit2/thumbnail.png" alt="Unit bonus 1 thumbnail" width="100%">
So let's get started 🚀
| deep-rl-class/units/en/unitbonus1/introduction.mdx/0 | {
"file_path": "deep-rl-class/units/en/unitbonus1/introduction.mdx",
"repo_id": "deep-rl-class",
"token_count": 138
} | 92 |
# Brief introduction to RL documentation
In this advanced topic, we address the question: **how should we monitor and keep track of powerful reinforcement learning agents that we are training in the real world and
interfacing with humans?**
As machine learning systems have increasingly impacted modern life, the **call for the documentation of these systems has grown**.
Such documentation can cover aspects such as the training data used — where it is stored, when it was collected, who was involved, etc.
— or the model optimization framework — the architecture, evaluation metrics, relevant papers, etc. — and more.
Today, model cards and datasheets are becoming increasingly available. For example, on the Hub
(see documentation [here](https://huggingface.co/docs/hub/model-cards)).
If you click on a [popular model on the Hub](https://huggingface.co/models), you can learn about its creation process.
These model and data specific logs are designed to be completed when the model or dataset are created, leaving them to go un-updated when these models are built into evolving systems in the future.
## Motivating Reward Reports
Reinforcement learning systems are fundamentally designed to optimize based on measurements of reward and time.
While the notion of a reward function can be mapped nicely to many well-understood fields of supervised learning (via a loss function),
understanding of how machine learning systems evolve over time is limited.
To that end, the authors introduce [*Reward Reports for Reinforcement Learning*](https://www.notion.so/Brief-introduction-to-RL-documentation-b8cbda5a6f5242338e0756e6bef72af4) (the pithy naming is designed to mirror the popular papers *Model Cards for Model Reporting* and *Datasheets for Datasets*).
The goal is to propose a type of documentation focused on the **human factors of reward** and **time-varying feedback systems**.
Building on the documentation frameworks for [model cards](https://arxiv.org/abs/1810.03993) and [datasheets](https://arxiv.org/abs/1803.09010) proposed by Mitchell et al. and Gebru et al., we argue the need for Reward Reports for AI systems.
**Reward Reports** are living documents for proposed RL deployments that demarcate design choices.
However, many questions remain about the applicability of this framework to different RL applications, roadblocks to system interpretability,
and the resonances between deployed supervised machine learning systems and the sequential decision-making utilized in RL.
At a minimum, Reward Reports are an opportunity for RL practitioners to deliberate on these questions and begin the work of deciding how to resolve them in practice.
## Capturing temporal behavior with documentation
The core piece specific to documentation designed for RL and feedback-driven ML systems is a *change-log*. The change-log updates information
from the designer (changed training parameters, data, etc.) along with noticed changes from the user (harmful behavior, unexpected responses, etc.).
The change log is accompanied by update triggers that encourage monitoring these effects.
## Contributing
Some of the most impactful RL-driven systems are multi-stakeholder in nature and behind the closed doors of private corporations.
These corporations are largely without regulation, so the burden of documentation falls on the public.
If you are interested in contributing, we are building Reward Reports for popular machine learning systems on a public
record on [GitHub](https://github.com/RewardReports/reward-reports).
For further reading, you can visit the Reward Reports [paper](https://arxiv.org/abs/2204.10817)
or look [an example report](https://github.com/RewardReports/reward-reports/tree/main/examples).
## Author
This section was written by <a href="https://twitter.com/natolambert"> Nathan Lambert </a>
| deep-rl-class/units/en/unitbonus3/rl-documentation.mdx/0 | {
"file_path": "deep-rl-class/units/en/unitbonus3/rl-documentation.mdx",
"repo_id": "deep-rl-class",
"token_count": 886
} | 93 |
<!---
Copyright 2022 - The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<p align="center">
<br>
<img src="https://raw.githubusercontent.com/huggingface/diffusers/main/docs/source/en/imgs/diffusers_library.jpg" width="400"/>
<br>
<p>
<p align="center">
<a href="https://github.com/huggingface/diffusers/blob/main/LICENSE">
<img alt="GitHub" src="https://img.shields.io/github/license/huggingface/datasets.svg?color=blue">
</a>
<a href="https://github.com/huggingface/diffusers/releases">
<img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/diffusers.svg">
</a>
<a href="https://pepy.tech/project/diffusers">
<img alt="GitHub release" src="https://static.pepy.tech/badge/diffusers/month">
</a>
<a href="CODE_OF_CONDUCT.md">
<img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-2.1-4baaaa.svg">
</a>
<a href="https://twitter.com/diffuserslib">
<img alt="X account" src="https://img.shields.io/twitter/url/https/twitter.com/diffuserslib.svg?style=social&label=Follow%20%40diffuserslib">
</a>
</p>
🤗 Diffusers is the go-to library for state-of-the-art pretrained diffusion models for generating images, audio, and even 3D structures of molecules. Whether you're looking for a simple inference solution or training your own diffusion models, 🤗 Diffusers is a modular toolbox that supports both. Our library is designed with a focus on [usability over performance](https://huggingface.co/docs/diffusers/conceptual/philosophy#usability-over-performance), [simple over easy](https://huggingface.co/docs/diffusers/conceptual/philosophy#simple-over-easy), and [customizability over abstractions](https://huggingface.co/docs/diffusers/conceptual/philosophy#tweakable-contributorfriendly-over-abstraction).
🤗 Diffusers offers three core components:
- State-of-the-art [diffusion pipelines](https://huggingface.co/docs/diffusers/api/pipelines/overview) that can be run in inference with just a few lines of code.
- Interchangeable noise [schedulers](https://huggingface.co/docs/diffusers/api/schedulers/overview) for different diffusion speeds and output quality.
- Pretrained [models](https://huggingface.co/docs/diffusers/api/models/overview) that can be used as building blocks, and combined with schedulers, for creating your own end-to-end diffusion systems.
## Installation
We recommend installing 🤗 Diffusers in a virtual environment from PyPI or Conda. For more details about installing [PyTorch](https://pytorch.org/get-started/locally/) and [Flax](https://flax.readthedocs.io/en/latest/#installation), please refer to their official documentation.
### PyTorch
With `pip` (official package):
```bash
pip install --upgrade diffusers[torch]
```
With `conda` (maintained by the community):
```sh
conda install -c conda-forge diffusers
```
### Flax
With `pip` (official package):
```bash
pip install --upgrade diffusers[flax]
```
### Apple Silicon (M1/M2) support
Please refer to the [How to use Stable Diffusion in Apple Silicon](https://huggingface.co/docs/diffusers/optimization/mps) guide.
## Quickstart
Generating outputs is super easy with 🤗 Diffusers. To generate an image from text, use the `from_pretrained` method to load any pretrained diffusion model (browse the [Hub](https://huggingface.co/models?library=diffusers&sort=downloads) for 22000+ checkpoints):
```python
from diffusers import DiffusionPipeline
import torch
pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
pipeline.to("cuda")
pipeline("An image of a squirrel in Picasso style").images[0]
```
You can also dig into the models and schedulers toolbox to build your own diffusion system:
```python
from diffusers import DDPMScheduler, UNet2DModel
from PIL import Image
import torch
scheduler = DDPMScheduler.from_pretrained("google/ddpm-cat-256")
model = UNet2DModel.from_pretrained("google/ddpm-cat-256").to("cuda")
scheduler.set_timesteps(50)
sample_size = model.config.sample_size
noise = torch.randn((1, 3, sample_size, sample_size), device="cuda")
input = noise
for t in scheduler.timesteps:
with torch.no_grad():
noisy_residual = model(input, t).sample
prev_noisy_sample = scheduler.step(noisy_residual, t, input).prev_sample
input = prev_noisy_sample
image = (input / 2 + 0.5).clamp(0, 1)
image = image.cpu().permute(0, 2, 3, 1).numpy()[0]
image = Image.fromarray((image * 255).round().astype("uint8"))
image
```
Check out the [Quickstart](https://huggingface.co/docs/diffusers/quicktour) to launch your diffusion journey today!
## How to navigate the documentation
| **Documentation** | **What can I learn?** |
|---------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| [Tutorial](https://huggingface.co/docs/diffusers/tutorials/tutorial_overview) | A basic crash course for learning how to use the library's most important features like using models and schedulers to build your own diffusion system, and training your own diffusion model. |
| [Loading](https://huggingface.co/docs/diffusers/using-diffusers/loading_overview) | Guides for how to load and configure all the components (pipelines, models, and schedulers) of the library, as well as how to use different schedulers. |
| [Pipelines for inference](https://huggingface.co/docs/diffusers/using-diffusers/pipeline_overview) | Guides for how to use pipelines for different inference tasks, batched generation, controlling generated outputs and randomness, and how to contribute a pipeline to the library. |
| [Optimization](https://huggingface.co/docs/diffusers/optimization/opt_overview) | Guides for how to optimize your diffusion model to run faster and consume less memory. |
| [Training](https://huggingface.co/docs/diffusers/training/overview) | Guides for how to train a diffusion model for different tasks with different training techniques. |
## Contribution
We ❤️ contributions from the open-source community!
If you want to contribute to this library, please check out our [Contribution guide](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md).
You can look out for [issues](https://github.com/huggingface/diffusers/issues) you'd like to tackle to contribute to the library.
- See [Good first issues](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) for general opportunities to contribute
- See [New model/pipeline](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22New+pipeline%2Fmodel%22) to contribute exciting new diffusion models / diffusion pipelines
- See [New scheduler](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22New+scheduler%22)
Also, say 👋 in our public Discord channel <a href="https://discord.gg/G7tWnz98XR"><img alt="Join us on Discord" src="https://img.shields.io/discord/823813159592001537?color=5865F2&logo=discord&logoColor=white"></a>. We discuss the hottest trends about diffusion models, help each other with contributions, personal projects or just hang out ☕.
## Popular Tasks & Pipelines
<table>
<tr>
<th>Task</th>
<th>Pipeline</th>
<th>🤗 Hub</th>
</tr>
<tr style="border-top: 2px solid black">
<td>Unconditional Image Generation</td>
<td><a href="https://huggingface.co/docs/diffusers/api/pipelines/ddpm"> DDPM </a></td>
<td><a href="https://huggingface.co/google/ddpm-ema-church-256"> google/ddpm-ema-church-256 </a></td>
</tr>
<tr style="border-top: 2px solid black">
<td>Text-to-Image</td>
<td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/text2img">Stable Diffusion Text-to-Image</a></td>
<td><a href="https://huggingface.co/runwayml/stable-diffusion-v1-5"> runwayml/stable-diffusion-v1-5 </a></td>
</tr>
<tr>
<td>Text-to-Image</td>
<td><a href="https://huggingface.co/docs/diffusers/api/pipelines/unclip">unCLIP</a></td>
<td><a href="https://huggingface.co/kakaobrain/karlo-v1-alpha"> kakaobrain/karlo-v1-alpha </a></td>
</tr>
<tr>
<td>Text-to-Image</td>
<td><a href="https://huggingface.co/docs/diffusers/api/pipelines/deepfloyd_if">DeepFloyd IF</a></td>
<td><a href="https://huggingface.co/DeepFloyd/IF-I-XL-v1.0"> DeepFloyd/IF-I-XL-v1.0 </a></td>
</tr>
<tr>
<td>Text-to-Image</td>
<td><a href="https://huggingface.co/docs/diffusers/api/pipelines/kandinsky">Kandinsky</a></td>
<td><a href="https://huggingface.co/kandinsky-community/kandinsky-2-2-decoder"> kandinsky-community/kandinsky-2-2-decoder </a></td>
</tr>
<tr style="border-top: 2px solid black">
<td>Text-guided Image-to-Image</td>
<td><a href="https://huggingface.co/docs/diffusers/api/pipelines/controlnet">ControlNet</a></td>
<td><a href="https://huggingface.co/lllyasviel/sd-controlnet-canny"> lllyasviel/sd-controlnet-canny </a></td>
</tr>
<tr>
<td>Text-guided Image-to-Image</td>
<td><a href="https://huggingface.co/docs/diffusers/api/pipelines/pix2pix">InstructPix2Pix</a></td>
<td><a href="https://huggingface.co/timbrooks/instruct-pix2pix"> timbrooks/instruct-pix2pix </a></td>
</tr>
<tr>
<td>Text-guided Image-to-Image</td>
<td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/img2img">Stable Diffusion Image-to-Image</a></td>
<td><a href="https://huggingface.co/runwayml/stable-diffusion-v1-5"> runwayml/stable-diffusion-v1-5 </a></td>
</tr>
<tr style="border-top: 2px solid black">
<td>Text-guided Image Inpainting</td>
<td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/inpaint">Stable Diffusion Inpainting</a></td>
<td><a href="https://huggingface.co/runwayml/stable-diffusion-inpainting"> runwayml/stable-diffusion-inpainting </a></td>
</tr>
<tr style="border-top: 2px solid black">
<td>Image Variation</td>
<td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/image_variation">Stable Diffusion Image Variation</a></td>
<td><a href="https://huggingface.co/lambdalabs/sd-image-variations-diffusers"> lambdalabs/sd-image-variations-diffusers </a></td>
</tr>
<tr style="border-top: 2px solid black">
<td>Super Resolution</td>
<td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/upscale">Stable Diffusion Upscale</a></td>
<td><a href="https://huggingface.co/stabilityai/stable-diffusion-x4-upscaler"> stabilityai/stable-diffusion-x4-upscaler </a></td>
</tr>
<tr>
<td>Super Resolution</td>
<td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/latent_upscale">Stable Diffusion Latent Upscale</a></td>
<td><a href="https://huggingface.co/stabilityai/sd-x2-latent-upscaler"> stabilityai/sd-x2-latent-upscaler </a></td>
</tr>
</table>
## Popular libraries using 🧨 Diffusers
- https://github.com/microsoft/TaskMatrix
- https://github.com/invoke-ai/InvokeAI
- https://github.com/apple/ml-stable-diffusion
- https://github.com/Sanster/lama-cleaner
- https://github.com/IDEA-Research/Grounded-Segment-Anything
- https://github.com/ashawkey/stable-dreamfusion
- https://github.com/deep-floyd/IF
- https://github.com/bentoml/BentoML
- https://github.com/bmaltais/kohya_ss
- +9000 other amazing GitHub repositories 💪
Thank you for using us ❤️.
## Credits
This library concretizes previous work by many different authors and would not have been possible without their great research and implementations. We'd like to thank, in particular, the following implementations which have helped us in our development and without which the API could not have been as polished today:
- @CompVis' latent diffusion models library, available [here](https://github.com/CompVis/latent-diffusion)
- @hojonathanho original DDPM implementation, available [here](https://github.com/hojonathanho/diffusion) as well as the extremely useful translation into PyTorch by @pesser, available [here](https://github.com/pesser/pytorch_diffusion)
- @ermongroup's DDIM implementation, available [here](https://github.com/ermongroup/ddim)
- @yang-song's Score-VE and Score-VP implementations, available [here](https://github.com/yang-song/score_sde_pytorch)
We also want to thank @heejkoo for the very helpful overview of papers, code and resources on diffusion models, available [here](https://github.com/heejkoo/Awesome-Diffusion-Models) as well as @crowsonkb and @rromb for useful discussions and insights.
## Citation
```bibtex
@misc{von-platen-etal-2022-diffusers,
author = {Patrick von Platen and Suraj Patil and Anton Lozhkov and Pedro Cuenca and Nathan Lambert and Kashif Rasul and Mishig Davaadorj and Dhruv Nair and Sayak Paul and William Berman and Yiyi Xu and Steven Liu and Thomas Wolf},
title = {Diffusers: State-of-the-art diffusion models},
year = {2022},
publisher = {GitHub},
journal = {GitHub repository},
howpublished = {\url{https://github.com/huggingface/diffusers}}
}
```
| diffusers/README.md/0 | {
"file_path": "diffusers/README.md",
"repo_id": "diffusers",
"token_count": 5416
} | 94 |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# PEFT
Diffusers supports loading adapters such as [LoRA](../../using-diffusers/loading_adapters) with the [PEFT](https://huggingface.co/docs/peft/index) library with the [`~loaders.peft.PeftAdapterMixin`] class. This allows modeling classes in Diffusers like [`UNet2DConditionModel`] to load an adapter.
<Tip>
Refer to the [Inference with PEFT](../../tutorials/using_peft_for_inference.md) tutorial for an overview of how to use PEFT in Diffusers for inference.
</Tip>
## PeftAdapterMixin
[[autodoc]] loaders.peft.PeftAdapterMixin
| diffusers/docs/source/en/api/loaders/peft.md/0 | {
"file_path": "diffusers/docs/source/en/api/loaders/peft.md",
"repo_id": "diffusers",
"token_count": 320
} | 95 |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Paint by Example
[Paint by Example: Exemplar-based Image Editing with Diffusion Models](https://huggingface.co/papers/2211.13227) is by Binxin Yang, Shuyang Gu, Bo Zhang, Ting Zhang, Xuejin Chen, Xiaoyan Sun, Dong Chen, Fang Wen.
The abstract from the paper is:
*Language-guided image editing has achieved great success recently. In this paper, for the first time, we investigate exemplar-guided image editing for more precise control. We achieve this goal by leveraging self-supervised training to disentangle and re-organize the source image and the exemplar. However, the naive approach will cause obvious fusing artifacts. We carefully analyze it and propose an information bottleneck and strong augmentations to avoid the trivial solution of directly copying and pasting the exemplar image. Meanwhile, to ensure the controllability of the editing process, we design an arbitrary shape mask for the exemplar image and leverage the classifier-free guidance to increase the similarity to the exemplar image. The whole framework involves a single forward of the diffusion model without any iterative optimization. We demonstrate that our method achieves an impressive performance and enables controllable editing on in-the-wild images with high fidelity.*
The original codebase can be found at [Fantasy-Studio/Paint-by-Example](https://github.com/Fantasy-Studio/Paint-by-Example), and you can try it out in a [demo](https://huggingface.co/spaces/Fantasy-Studio/Paint-by-Example).
## Tips
Paint by Example is supported by the official [Fantasy-Studio/Paint-by-Example](https://huggingface.co/Fantasy-Studio/Paint-by-Example) checkpoint. The checkpoint is warm-started from [CompVis/stable-diffusion-v1-4](https://huggingface.co/CompVis/stable-diffusion-v1-4) to inpaint partly masked images conditioned on example and reference images.
<Tip>
Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines.
</Tip>
## PaintByExamplePipeline
[[autodoc]] PaintByExamplePipeline
- all
- __call__
## StableDiffusionPipelineOutput
[[autodoc]] pipelines.stable_diffusion.StableDiffusionPipelineOutput
| diffusers/docs/source/en/api/pipelines/paint_by_example.md/0 | {
"file_path": "diffusers/docs/source/en/api/pipelines/paint_by_example.md",
"repo_id": "diffusers",
"token_count": 762
} | 96 |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Latent Consistency Model Multistep Scheduler
## Overview
Multistep and onestep scheduler (Algorithm 3) introduced alongside latent consistency models in the paper [Latent Consistency Models: Synthesizing High-Resolution Images with Few-Step Inference](https://arxiv.org/abs/2310.04378) by Simian Luo, Yiqin Tan, Longbo Huang, Jian Li, and Hang Zhao.
This scheduler should be able to generate good samples from [`LatentConsistencyModelPipeline`] in 1-8 steps.
## LCMScheduler
[[autodoc]] LCMScheduler
| diffusers/docs/source/en/api/schedulers/lcm.md/0 | {
"file_path": "diffusers/docs/source/en/api/schedulers/lcm.md",
"repo_id": "diffusers",
"token_count": 291
} | 97 |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# 🧨 Diffusers’ Ethical Guidelines
## Preamble
[Diffusers](https://huggingface.co/docs/diffusers/index) provides pre-trained diffusion models and serves as a modular toolbox for inference and training.
Given its real case applications in the world and potential negative impacts on society, we think it is important to provide the project with ethical guidelines to guide the development, users’ contributions, and usage of the Diffusers library.
The risks associated with using this technology are still being examined, but to name a few: copyrights issues for artists; deep-fake exploitation; sexual content generation in inappropriate contexts; non-consensual impersonation; harmful social biases perpetuating the oppression of marginalized groups.
We will keep tracking risks and adapt the following guidelines based on the community's responsiveness and valuable feedback.
## Scope
The Diffusers community will apply the following ethical guidelines to the project’s development and help coordinate how the community will integrate the contributions, especially concerning sensitive topics related to ethical concerns.
## Ethical guidelines
The following ethical guidelines apply generally, but we will primarily implement them when dealing with ethically sensitive issues while making a technical choice. Furthermore, we commit to adapting those ethical principles over time following emerging harms related to the state of the art of the technology in question.
- **Transparency**: we are committed to being transparent in managing PRs, explaining our choices to users, and making technical decisions.
- **Consistency**: we are committed to guaranteeing our users the same level of attention in project management, keeping it technically stable and consistent.
- **Simplicity**: with a desire to make it easy to use and exploit the Diffusers library, we are committed to keeping the project’s goals lean and coherent.
- **Accessibility**: the Diffusers project helps lower the entry bar for contributors who can help run it even without technical expertise. Doing so makes research artifacts more accessible to the community.
- **Reproducibility**: we aim to be transparent about the reproducibility of upstream code, models, and datasets when made available through the Diffusers library.
- **Responsibility**: as a community and through teamwork, we hold a collective responsibility to our users by anticipating and mitigating this technology's potential risks and dangers.
## Examples of implementations: Safety features and Mechanisms
The team works daily to make the technical and non-technical tools available to deal with the potential ethical and social risks associated with diffusion technology. Moreover, the community's input is invaluable in ensuring these features' implementation and raising awareness with us.
- [**Community tab**](https://huggingface.co/docs/hub/repositories-pull-requests-discussions): it enables the community to discuss and better collaborate on a project.
- **Bias exploration and evaluation**: the Hugging Face team provides a [space](https://huggingface.co/spaces/society-ethics/DiffusionBiasExplorer) to demonstrate the biases in Stable Diffusion interactively. In this sense, we support and encourage bias explorers and evaluations.
- **Encouraging safety in deployment**
- [**Safe Stable Diffusion**](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/stable_diffusion_safe): It mitigates the well-known issue that models, like Stable Diffusion, that are trained on unfiltered, web-crawled datasets tend to suffer from inappropriate degeneration. Related paper: [Safe Latent Diffusion: Mitigating Inappropriate Degeneration in Diffusion Models](https://arxiv.org/abs/2211.05105).
- [**Safety Checker**](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/safety_checker.py): It checks and compares the class probability of a set of hard-coded harmful concepts in the embedding space against an image after it has been generated. The harmful concepts are intentionally hidden to prevent reverse engineering of the checker.
- **Staged released on the Hub**: in particularly sensitive situations, access to some repositories should be restricted. This staged release is an intermediary step that allows the repository’s authors to have more control over its use.
- **Licensing**: [OpenRAILs](https://huggingface.co/blog/open_rail), a new type of licensing, allow us to ensure free access while having a set of restrictions that ensure more responsible use.
| diffusers/docs/source/en/conceptual/ethical_guidelines.md/0 | {
"file_path": "diffusers/docs/source/en/conceptual/ethical_guidelines.md",
"repo_id": "diffusers",
"token_count": 1155
} | 98 |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Token merging
[Token merging](https://huggingface.co/papers/2303.17604) (ToMe) merges redundant tokens/patches progressively in the forward pass of a Transformer-based network which can speed-up the inference latency of [`StableDiffusionPipeline`].
Install ToMe from `pip`:
```bash
pip install tomesd
```
You can use ToMe from the [`tomesd`](https://github.com/dbolya/tomesd) library with the [`apply_patch`](https://github.com/dbolya/tomesd?tab=readme-ov-file#usage) function:
```diff
from diffusers import StableDiffusionPipeline
import torch
import tomesd
pipeline = StableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True,
).to("cuda")
+ tomesd.apply_patch(pipeline, ratio=0.5)
image = pipeline("a photo of an astronaut riding a horse on mars").images[0]
```
The `apply_patch` function exposes a number of [arguments](https://github.com/dbolya/tomesd#usage) to help strike a balance between pipeline inference speed and the quality of the generated tokens. The most important argument is `ratio` which controls the number of tokens that are merged during the forward pass.
As reported in the [paper](https://huggingface.co/papers/2303.17604), ToMe can greatly preserve the quality of the generated images while boosting inference speed. By increasing the `ratio`, you can speed-up inference even further, but at the cost of some degraded image quality.
To test the quality of the generated images, we sampled a few prompts from [Parti Prompts](https://parti.research.google/) and performed inference with the [`StableDiffusionPipeline`] with the following settings:
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/tome/tome_samples.png">
</div>
We didn’t notice any significant decrease in the quality of the generated samples, and you can check out the generated samples in this [WandB report](https://wandb.ai/sayakpaul/tomesd-results/runs/23j4bj3i?workspace=). If you're interested in reproducing this experiment, use this [script](https://gist.github.com/sayakpaul/8cac98d7f22399085a060992f411ecbd).
## Benchmarks
We also benchmarked the impact of `tomesd` on the [`StableDiffusionPipeline`] with [xFormers](https://huggingface.co/docs/diffusers/optimization/xformers) enabled across several image resolutions. The results are obtained from A100 and V100 GPUs in the following development environment:
```bash
- `diffusers` version: 0.15.1
- Python version: 3.8.16
- PyTorch version (GPU?): 1.13.1+cu116 (True)
- Huggingface_hub version: 0.13.2
- Transformers version: 4.27.2
- Accelerate version: 0.18.0
- xFormers version: 0.0.16
- tomesd version: 0.1.2
```
To reproduce this benchmark, feel free to use this [script](https://gist.github.com/sayakpaul/27aec6bca7eb7b0e0aa4112205850335). The results are reported in seconds, and where applicable we report the speed-up percentage over the vanilla pipeline when using ToMe and ToMe + xFormers.
| **GPU** | **Resolution** | **Batch size** | **Vanilla** | **ToMe** | **ToMe + xFormers** |
|----------|----------------|----------------|-------------|----------------|---------------------|
| **A100** | 512 | 10 | 6.88 | 5.26 (+23.55%) | 4.69 (+31.83%) |
| | 768 | 10 | OOM | 14.71 | 11 |
| | | 8 | OOM | 11.56 | 8.84 |
| | | 4 | OOM | 5.98 | 4.66 |
| | | 2 | 4.99 | 3.24 (+35.07%) | 2.1 (+37.88%) |
| | | 1 | 3.29 | 2.24 (+31.91%) | 2.03 (+38.3%) |
| | 1024 | 10 | OOM | OOM | OOM |
| | | 8 | OOM | OOM | OOM |
| | | 4 | OOM | 12.51 | 9.09 |
| | | 2 | OOM | 6.52 | 4.96 |
| | | 1 | 6.4 | 3.61 (+43.59%) | 2.81 (+56.09%) |
| **V100** | 512 | 10 | OOM | 10.03 | 9.29 |
| | | 8 | OOM | 8.05 | 7.47 |
| | | 4 | 5.7 | 4.3 (+24.56%) | 3.98 (+30.18%) |
| | | 2 | 3.14 | 2.43 (+22.61%) | 2.27 (+27.71%) |
| | | 1 | 1.88 | 1.57 (+16.49%) | 1.57 (+16.49%) |
| | 768 | 10 | OOM | OOM | 23.67 |
| | | 8 | OOM | OOM | 18.81 |
| | | 4 | OOM | 11.81 | 9.7 |
| | | 2 | OOM | 6.27 | 5.2 |
| | | 1 | 5.43 | 3.38 (+37.75%) | 2.82 (+48.07%) |
| | 1024 | 10 | OOM | OOM | OOM |
| | | 8 | OOM | OOM | OOM |
| | | 4 | OOM | OOM | 19.35 |
| | | 2 | OOM | 13 | 10.78 |
| | | 1 | OOM | 6.66 | 5.54 |
As seen in the tables above, the speed-up from `tomesd` becomes more pronounced for larger image resolutions. It is also interesting to note that with `tomesd`, it is possible to run the pipeline on a higher resolution like 1024x1024. You may be able to speed-up inference even more with [`torch.compile`](torch2.0).
| diffusers/docs/source/en/optimization/tome.md/0 | {
"file_path": "diffusers/docs/source/en/optimization/tome.md",
"repo_id": "diffusers",
"token_count": 3379
} | 99 |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Overview
🤗 Diffusers provides a collection of training scripts for you to train your own diffusion models. You can find all of our training scripts in [diffusers/examples](https://github.com/huggingface/diffusers/tree/main/examples).
Each training script is:
- **Self-contained**: the training script does not depend on any local files, and all packages required to run the script are installed from the `requirements.txt` file.
- **Easy-to-tweak**: the training scripts are an example of how to train a diffusion model for a specific task and won't work out-of-the-box for every training scenario. You'll likely need to adapt the training script for your specific use-case. To help you with that, we've fully exposed the data preprocessing code and the training loop so you can modify it for your own use.
- **Beginner-friendly**: the training scripts are designed to be beginner-friendly and easy to understand, rather than including the latest state-of-the-art methods to get the best and most competitive results. Any training methods we consider too complex are purposefully left out.
- **Single-purpose**: each training script is expressly designed for only one task to keep it readable and understandable.
Our current collection of training scripts include:
| Training | SDXL-support | LoRA-support | Flax-support |
|---|---|---|---|
| [unconditional image generation](https://github.com/huggingface/diffusers/tree/main/examples/unconditional_image_generation) [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/training_example.ipynb) | | | |
| [text-to-image](https://github.com/huggingface/diffusers/tree/main/examples/text_to_image) | 👍 | 👍 | 👍 |
| [textual inversion](https://github.com/huggingface/diffusers/tree/main/examples/textual_inversion) [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb) | | | 👍 |
| [DreamBooth](https://github.com/huggingface/diffusers/tree/main/examples/dreambooth) [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_training.ipynb) | 👍 | 👍 | 👍 |
| [ControlNet](https://github.com/huggingface/diffusers/tree/main/examples/controlnet) | 👍 | | 👍 |
| [InstructPix2Pix](https://github.com/huggingface/diffusers/tree/main/examples/instruct_pix2pix) | 👍 | | |
| [Custom Diffusion](https://github.com/huggingface/diffusers/tree/main/examples/custom_diffusion) | | | |
| [T2I-Adapters](https://github.com/huggingface/diffusers/tree/main/examples/t2i_adapter) | 👍 | | |
| [Kandinsky 2.2](https://github.com/huggingface/diffusers/tree/main/examples/kandinsky2_2/text_to_image) | | 👍 | |
| [Wuerstchen](https://github.com/huggingface/diffusers/tree/main/examples/wuerstchen/text_to_image) | | 👍 | |
These examples are **actively** maintained, so please feel free to open an issue if they aren't working as expected. If you feel like another training example should be included, you're more than welcome to start a [Feature Request](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=&template=feature_request.md&title=) to discuss your feature idea with us and whether it meets our criteria of being self-contained, easy-to-tweak, beginner-friendly, and single-purpose.
## Install
Make sure you can successfully run the latest versions of the example scripts by installing the library from source in a new virtual environment:
```bash
git clone https://github.com/huggingface/diffusers
cd diffusers
pip install .
```
Then navigate to the folder of the training script (for example, [DreamBooth](https://github.com/huggingface/diffusers/tree/main/examples/dreambooth)) and install the `requirements.txt` file. Some training scripts have a specific requirement file for SDXL, LoRA or Flax. If you're using one of these scripts, make sure you install its corresponding requirements file.
```bash
cd examples/dreambooth
pip install -r requirements.txt
# to train SDXL with DreamBooth
pip install -r requirements_sdxl.txt
```
To speedup training and reduce memory-usage, we recommend:
- using PyTorch 2.0 or higher to automatically use [scaled dot product attention](../optimization/torch2.0#scaled-dot-product-attention) during training (you don't need to make any changes to the training code)
- installing [xFormers](../optimization/xformers) to enable memory-efficient attention | diffusers/docs/source/en/training/overview.md/0 | {
"file_path": "diffusers/docs/source/en/training/overview.md",
"repo_id": "diffusers",
"token_count": 1545
} | 100 |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Controlled generation
Controlling outputs generated by diffusion models has been long pursued by the community and is now an active research topic. In many popular diffusion models, subtle changes in inputs, both images and text prompts, can drastically change outputs. In an ideal world we want to be able to control how semantics are preserved and changed.
Most examples of preserving semantics reduce to being able to accurately map a change in input to a change in output. I.e. adding an adjective to a subject in a prompt preserves the entire image, only modifying the changed subject. Or, image variation of a particular subject preserves the subject's pose.
Additionally, there are qualities of generated images that we would like to influence beyond semantic preservation. I.e. in general, we would like our outputs to be of good quality, adhere to a particular style, or be realistic.
We will document some of the techniques `diffusers` supports to control generation of diffusion models. Much is cutting edge research and can be quite nuanced. If something needs clarifying or you have a suggestion, don't hesitate to open a discussion on the [forum](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers/63) or a [GitHub issue](https://github.com/huggingface/diffusers/issues).
We provide a high level explanation of how the generation can be controlled as well as a snippet of the technicals. For more in depth explanations on the technicals, the original papers which are linked from the pipelines are always the best resources.
Depending on the use case, one should choose a technique accordingly. In many cases, these techniques can be combined. For example, one can combine Textual Inversion with SEGA to provide more semantic guidance to the outputs generated using Textual Inversion.
Unless otherwise mentioned, these are techniques that work with existing models and don't require their own weights.
1. [InstructPix2Pix](#instruct-pix2pix)
2. [Pix2Pix Zero](#pix2pix-zero)
3. [Attend and Excite](#attend-and-excite)
4. [Semantic Guidance](#semantic-guidance-sega)
5. [Self-attention Guidance](#self-attention-guidance-sag)
6. [Depth2Image](#depth2image)
7. [MultiDiffusion Panorama](#multidiffusion-panorama)
8. [DreamBooth](#dreambooth)
9. [Textual Inversion](#textual-inversion)
10. [ControlNet](#controlnet)
11. [Prompt Weighting](#prompt-weighting)
12. [Custom Diffusion](#custom-diffusion)
13. [Model Editing](#model-editing)
14. [DiffEdit](#diffedit)
15. [T2I-Adapter](#t2i-adapter)
16. [FABRIC](#fabric)
For convenience, we provide a table to denote which methods are inference-only and which require fine-tuning/training.
| **Method** | **Inference only** | **Requires training /<br> fine-tuning** | **Comments** |
| :-------------------------------------------------: | :----------------: | :-------------------------------------: | :---------------------------------------------------------------------------------------------: |
| [InstructPix2Pix](#instruct-pix2pix) | ✅ | ❌ | Can additionally be<br>fine-tuned for better <br>performance on specific <br>edit instructions. |
| [Pix2Pix Zero](#pix2pix-zero) | ✅ | ❌ | |
| [Attend and Excite](#attend-and-excite) | ✅ | ❌ | |
| [Semantic Guidance](#semantic-guidance-sega) | ✅ | ❌ | |
| [Self-attention Guidance](#self-attention-guidance-sag) | ✅ | ❌ | |
| [Depth2Image](#depth2image) | ✅ | ❌ | |
| [MultiDiffusion Panorama](#multidiffusion-panorama) | ✅ | ❌ | |
| [DreamBooth](#dreambooth) | ❌ | ✅ | |
| [Textual Inversion](#textual-inversion) | ❌ | ✅ | |
| [ControlNet](#controlnet) | ✅ | ❌ | A ControlNet can be <br>trained/fine-tuned on<br>a custom conditioning. |
| [Prompt Weighting](#prompt-weighting) | ✅ | ❌ | |
| [Custom Diffusion](#custom-diffusion) | ❌ | ✅ | |
| [Model Editing](#model-editing) | ✅ | ❌ | |
| [DiffEdit](#diffedit) | ✅ | ❌ | |
| [T2I-Adapter](#t2i-adapter) | ✅ | ❌ | |
| [Fabric](#fabric) | ✅ | ❌ | |
## InstructPix2Pix
[Paper](https://arxiv.org/abs/2211.09800)
[InstructPix2Pix](../api/pipelines/pix2pix) is fine-tuned from Stable Diffusion to support editing input images. It takes as inputs an image and a prompt describing an edit, and it outputs the edited image.
InstructPix2Pix has been explicitly trained to work well with [InstructGPT](https://openai.com/blog/instruction-following/)-like prompts.
## Pix2Pix Zero
[Paper](https://arxiv.org/abs/2302.03027)
[Pix2Pix Zero](../api/pipelines/pix2pix_zero) allows modifying an image so that one concept or subject is translated to another one while preserving general image semantics.
The denoising process is guided from one conceptual embedding towards another conceptual embedding. The intermediate latents are optimized during the denoising process to push the attention maps towards reference attention maps. The reference attention maps are from the denoising process of the input image and are used to encourage semantic preservation.
Pix2Pix Zero can be used both to edit synthetic images as well as real images.
- To edit synthetic images, one first generates an image given a caption.
Next, we generate image captions for the concept that shall be edited and for the new target concept. We can use a model like [Flan-T5](https://huggingface.co/docs/transformers/model_doc/flan-t5) for this purpose. Then, "mean" prompt embeddings for both the source and target concepts are created via the text encoder. Finally, the pix2pix-zero algorithm is used to edit the synthetic image.
- To edit a real image, one first generates an image caption using a model like [BLIP](https://huggingface.co/docs/transformers/model_doc/blip). Then one applies DDIM inversion on the prompt and image to generate "inverse" latents. Similar to before, "mean" prompt embeddings for both source and target concepts are created and finally the pix2pix-zero algorithm in combination with the "inverse" latents is used to edit the image.
<Tip>
Pix2Pix Zero is the first model that allows "zero-shot" image editing. This means that the model
can edit an image in less than a minute on a consumer GPU as shown [here](../api/pipelines/pix2pix_zero#usage-example).
</Tip>
As mentioned above, Pix2Pix Zero includes optimizing the latents (and not any of the UNet, VAE, or the text encoder) to steer the generation toward a specific concept. This means that the overall
pipeline might require more memory than a standard [StableDiffusionPipeline](../api/pipelines/stable_diffusion/text2img).
<Tip>
An important distinction between methods like InstructPix2Pix and Pix2Pix Zero is that the former
involves fine-tuning the pre-trained weights while the latter does not. This means that you can
apply Pix2Pix Zero to any of the available Stable Diffusion models.
</Tip>
## Attend and Excite
[Paper](https://arxiv.org/abs/2301.13826)
[Attend and Excite](../api/pipelines/attend_and_excite) allows subjects in the prompt to be faithfully represented in the final image.
A set of token indices are given as input, corresponding to the subjects in the prompt that need to be present in the image. During denoising, each token index is guaranteed to have a minimum attention threshold for at least one patch of the image. The intermediate latents are iteratively optimized during the denoising process to strengthen the attention of the most neglected subject token until the attention threshold is passed for all subject tokens.
Like Pix2Pix Zero, Attend and Excite also involves a mini optimization loop (leaving the pre-trained weights untouched) in its pipeline and can require more memory than the usual [StableDiffusionPipeline](../api/pipelines/stable_diffusion/text2img).
## Semantic Guidance (SEGA)
[Paper](https://arxiv.org/abs/2301.12247)
[SEGA](../api/pipelines/semantic_stable_diffusion) allows applying or removing one or more concepts from an image. The strength of the concept can also be controlled. I.e. the smile concept can be used to incrementally increase or decrease the smile of a portrait.
Similar to how classifier free guidance provides guidance via empty prompt inputs, SEGA provides guidance on conceptual prompts. Multiple of these conceptual prompts can be applied simultaneously. Each conceptual prompt can either add or remove their concept depending on if the guidance is applied positively or negatively.
Unlike Pix2Pix Zero or Attend and Excite, SEGA directly interacts with the diffusion process instead of performing any explicit gradient-based optimization.
## Self-attention Guidance (SAG)
[Paper](https://arxiv.org/abs/2210.00939)
[Self-attention Guidance](../api/pipelines/self_attention_guidance) improves the general quality of images.
SAG provides guidance from predictions not conditioned on high-frequency details to fully conditioned images. The high frequency details are extracted out of the UNet self-attention maps.
## Depth2Image
[Project](https://huggingface.co/stabilityai/stable-diffusion-2-depth)
[Depth2Image](../api/pipelines/stable_diffusion/depth2img) is fine-tuned from Stable Diffusion to better preserve semantics for text guided image variation.
It conditions on a monocular depth estimate of the original image.
## MultiDiffusion Panorama
[Paper](https://arxiv.org/abs/2302.08113)
[MultiDiffusion Panorama](../api/pipelines/panorama) defines a new generation process over a pre-trained diffusion model. This process binds together multiple diffusion generation methods that can be readily applied to generate high quality and diverse images. Results adhere to user-provided controls, such as desired aspect ratio (e.g., panorama), and spatial guiding signals, ranging from tight segmentation masks to bounding boxes.
MultiDiffusion Panorama allows to generate high-quality images at arbitrary aspect ratios (e.g., panoramas).
## Fine-tuning your own models
In addition to pre-trained models, Diffusers has training scripts for fine-tuning models on user-provided data.
## DreamBooth
[Project](https://dreambooth.github.io/)
[DreamBooth](../training/dreambooth) fine-tunes a model to teach it about a new subject. I.e. a few pictures of a person can be used to generate images of that person in different styles.
## Textual Inversion
[Paper](https://arxiv.org/abs/2208.01618)
[Textual Inversion](../training/text_inversion) fine-tunes a model to teach it about a new concept. I.e. a few pictures of a style of artwork can be used to generate images in that style.
## ControlNet
[Paper](https://arxiv.org/abs/2302.05543)
[ControlNet](../api/pipelines/controlnet) is an auxiliary network which adds an extra condition.
There are 8 canonical pre-trained ControlNets trained on different conditionings such as edge detection, scribbles,
depth maps, and semantic segmentations.
## Prompt Weighting
[Prompt weighting](../using-diffusers/weighted_prompts) is a simple technique that puts more attention weight on certain parts of the text
input.
## Custom Diffusion
[Paper](https://arxiv.org/abs/2212.04488)
[Custom Diffusion](../training/custom_diffusion) only fine-tunes the cross-attention maps of a pre-trained
text-to-image diffusion model. It also allows for additionally performing Textual Inversion. It supports
multi-concept training by design. Like DreamBooth and Textual Inversion, Custom Diffusion is also used to
teach a pre-trained text-to-image diffusion model about new concepts to generate outputs involving the
concept(s) of interest.
## Model Editing
[Paper](https://arxiv.org/abs/2303.08084)
The [text-to-image model editing pipeline](../api/pipelines/model_editing) helps you mitigate some of the incorrect implicit assumptions a pre-trained text-to-image
diffusion model might make about the subjects present in the input prompt. For example, if you prompt Stable Diffusion to generate images for "A pack of roses", the roses in the generated images
are more likely to be red. This pipeline helps you change that assumption.
## DiffEdit
[Paper](https://arxiv.org/abs/2210.11427)
[DiffEdit](../api/pipelines/diffedit) allows for semantic editing of input images along with
input prompts while preserving the original input images as much as possible.
## T2I-Adapter
[Paper](https://arxiv.org/abs/2302.08453)
[T2I-Adapter](../api/pipelines/stable_diffusion/adapter) is an auxiliary network which adds an extra condition.
There are 8 canonical pre-trained adapters trained on different conditionings such as edge detection, sketch,
depth maps, and semantic segmentations.
## Fabric
[Paper](https://arxiv.org/abs/2307.10159)
[Fabric](https://github.com/huggingface/diffusers/tree/442017ccc877279bcf24fbe92f92d3d0def191b6/examples/community#stable-diffusion-fabric-pipeline) is a training-free
approach applicable to a wide range of popular diffusion models, which exploits
the self-attention layer present in the most widely used architectures to condition
the diffusion process on a set of feedback images.
| diffusers/docs/source/en/using-diffusers/controlling_generation.md/0 | {
"file_path": "diffusers/docs/source/en/using-diffusers/controlling_generation.md",
"repo_id": "diffusers",
"token_count": 6311
} | 101 |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Load adapters
[[open-in-colab]]
There are several [training](../training/overview) techniques for personalizing diffusion models to generate images of a specific subject or images in certain styles. Each of these training methods produces a different type of adapter. Some of the adapters generate an entirely new model, while other adapters only modify a smaller set of embeddings or weights. This means the loading process for each adapter is also different.
This guide will show you how to load DreamBooth, textual inversion, and LoRA weights.
<Tip>
Feel free to browse the [Stable Diffusion Conceptualizer](https://huggingface.co/spaces/sd-concepts-library/stable-diffusion-conceptualizer), [LoRA the Explorer](https://huggingface.co/spaces/multimodalart/LoraTheExplorer), and the [Diffusers Models Gallery](https://huggingface.co/spaces/huggingface-projects/diffusers-gallery) for checkpoints and embeddings to use.
</Tip>
## DreamBooth
[DreamBooth](https://dreambooth.github.io/) finetunes an *entire diffusion model* on just several images of a subject to generate images of that subject in new styles and settings. This method works by using a special word in the prompt that the model learns to associate with the subject image. Of all the training methods, DreamBooth produces the largest file size (usually a few GBs) because it is a full checkpoint model.
Let's load the [herge_style](https://huggingface.co/sd-dreambooth-library/herge-style) checkpoint, which is trained on just 10 images drawn by Hergé, to generate images in that style. For it to work, you need to include the special word `herge_style` in your prompt to trigger the checkpoint:
```py
from diffusers import AutoPipelineForText2Image
import torch
pipeline = AutoPipelineForText2Image.from_pretrained("sd-dreambooth-library/herge-style", torch_dtype=torch.float16).to("cuda")
prompt = "A cute herge_style brown bear eating a slice of pizza, stunning color scheme, masterpiece, illustration"
image = pipeline(prompt).images[0]
image
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/load_dreambooth.png" />
</div>
## Textual inversion
[Textual inversion](https://textual-inversion.github.io/) is very similar to DreamBooth and it can also personalize a diffusion model to generate certain concepts (styles, objects) from just a few images. This method works by training and finding new embeddings that represent the images you provide with a special word in the prompt. As a result, the diffusion model weights stay the same and the training process produces a relatively tiny (a few KBs) file.
Because textual inversion creates embeddings, it cannot be used on its own like DreamBooth and requires another model.
```py
from diffusers import AutoPipelineForText2Image
import torch
pipeline = AutoPipelineForText2Image.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16).to("cuda")
```
Now you can load the textual inversion embeddings with the [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] method and generate some images. Let's load the [sd-concepts-library/gta5-artwork](https://huggingface.co/sd-concepts-library/gta5-artwork) embeddings and you'll need to include the special word `<gta5-artwork>` in your prompt to trigger it:
```py
pipeline.load_textual_inversion("sd-concepts-library/gta5-artwork")
prompt = "A cute brown bear eating a slice of pizza, stunning color scheme, masterpiece, illustration, <gta5-artwork> style"
image = pipeline(prompt).images[0]
image
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/load_txt_embed.png" />
</div>
Textual inversion can also be trained on undesirable things to create *negative embeddings* to discourage a model from generating images with those undesirable things like blurry images or extra fingers on a hand. This can be an easy way to quickly improve your prompt. You'll also load the embeddings with [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`], but this time, you'll need two more parameters:
- `weight_name`: specifies the weight file to load if the file was saved in the 🤗 Diffusers format with a specific name or if the file is stored in the A1111 format
- `token`: specifies the special word to use in the prompt to trigger the embeddings
Let's load the [sayakpaul/EasyNegative-test](https://huggingface.co/sayakpaul/EasyNegative-test) embeddings:
```py
pipeline.load_textual_inversion(
"sayakpaul/EasyNegative-test", weight_name="EasyNegative.safetensors", token="EasyNegative"
)
```
Now you can use the `token` to generate an image with the negative embeddings:
```py
prompt = "A cute brown bear eating a slice of pizza, stunning color scheme, masterpiece, illustration, EasyNegative"
negative_prompt = "EasyNegative"
image = pipeline(prompt, negative_prompt=negative_prompt, num_inference_steps=50).images[0]
image
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/load_neg_embed.png" />
</div>
## LoRA
[Low-Rank Adaptation (LoRA)](https://huggingface.co/papers/2106.09685) is a popular training technique because it is fast and generates smaller file sizes (a couple hundred MBs). Like the other methods in this guide, LoRA can train a model to learn new styles from just a few images. It works by inserting new weights into the diffusion model and then only the new weights are trained instead of the entire model. This makes LoRAs faster to train and easier to store.
<Tip>
LoRA is a very general training technique that can be used with other training methods. For example, it is common to train a model with DreamBooth and LoRA. It is also increasingly common to load and merge multiple LoRAs to create new and unique images. You can learn more about it in the in-depth [Merge LoRAs](merge_loras) guide since merging is outside the scope of this loading guide.
</Tip>
LoRAs also need to be used with another model:
```py
from diffusers import AutoPipelineForText2Image
import torch
pipeline = AutoPipelineForText2Image.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16).to("cuda")
```
Then use the [`~loaders.LoraLoaderMixin.load_lora_weights`] method to load the [ostris/super-cereal-sdxl-lora](https://huggingface.co/ostris/super-cereal-sdxl-lora) weights and specify the weights filename from the repository:
```py
pipeline.load_lora_weights("ostris/super-cereal-sdxl-lora", weight_name="cereal_box_sdxl_v1.safetensors")
prompt = "bears, pizza bites"
image = pipeline(prompt).images[0]
image
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/load_lora.png" />
</div>
The [`~loaders.LoraLoaderMixin.load_lora_weights`] method loads LoRA weights into both the UNet and text encoder. It is the preferred way for loading LoRAs because it can handle cases where:
- the LoRA weights don't have separate identifiers for the UNet and text encoder
- the LoRA weights have separate identifiers for the UNet and text encoder
But if you only need to load LoRA weights into the UNet, then you can use the [`~loaders.UNet2DConditionLoadersMixin.load_attn_procs`] method. Let's load the [jbilcke-hf/sdxl-cinematic-1](https://huggingface.co/jbilcke-hf/sdxl-cinematic-1) LoRA:
```py
from diffusers import AutoPipelineForText2Image
import torch
pipeline = AutoPipelineForText2Image.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16).to("cuda")
pipeline.unet.load_attn_procs("jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors")
# use cnmt in the prompt to trigger the LoRA
prompt = "A cute cnmt eating a slice of pizza, stunning color scheme, masterpiece, illustration"
image = pipeline(prompt).images[0]
image
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/load_attn_proc.png" />
</div>
<Tip>
For both [`~loaders.LoraLoaderMixin.load_lora_weights`] and [`~loaders.UNet2DConditionLoadersMixin.load_attn_procs`], you can pass the `cross_attention_kwargs={"scale": 0.5}` parameter to adjust how much of the LoRA weights to use. A value of `0` is the same as only using the base model weights, and a value of `1` is equivalent to using the fully finetuned LoRA.
</Tip>
To unload the LoRA weights, use the [`~loaders.LoraLoaderMixin.unload_lora_weights`] method to discard the LoRA weights and restore the model to its original weights:
```py
pipeline.unload_lora_weights()
```
### Kohya and TheLastBen
Other popular LoRA trainers from the community include those by [Kohya](https://github.com/kohya-ss/sd-scripts/) and [TheLastBen](https://github.com/TheLastBen/fast-stable-diffusion). These trainers create different LoRA checkpoints than those trained by 🤗 Diffusers, but they can still be loaded in the same way.
<hfoptions id="other-trainers">
<hfoption id="Kohya">
To load a Kohya LoRA, let's download the [Blueprintify SD XL 1.0](https://civitai.com/models/150986/blueprintify-sd-xl-10) checkpoint from [Civitai](https://civitai.com/) as an example:
```sh
!wget https://civitai.com/api/download/models/168776 -O blueprintify-sd-xl-10.safetensors
```
Load the LoRA checkpoint with the [`~loaders.LoraLoaderMixin.load_lora_weights`] method, and specify the filename in the `weight_name` parameter:
```py
from diffusers import AutoPipelineForText2Image
import torch
pipeline = AutoPipelineForText2Image.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16).to("cuda")
pipeline.load_lora_weights("path/to/weights", weight_name="blueprintify-sd-xl-10.safetensors")
```
Generate an image:
```py
# use bl3uprint in the prompt to trigger the LoRA
prompt = "bl3uprint, a highly detailed blueprint of the eiffel tower, explaining how to build all parts, many txt, blueprint grid backdrop"
image = pipeline(prompt).images[0]
image
```
<Tip warning={true}>
Some limitations of using Kohya LoRAs with 🤗 Diffusers include:
- Images may not look like those generated by UIs - like ComfyUI - for multiple reasons, which are explained [here](https://github.com/huggingface/diffusers/pull/4287/#issuecomment-1655110736).
- [LyCORIS checkpoints](https://github.com/KohakuBlueleaf/LyCORIS) aren't fully supported. The [`~loaders.LoraLoaderMixin.load_lora_weights`] method loads LyCORIS checkpoints with LoRA and LoCon modules, but Hada and LoKR are not supported.
</Tip>
</hfoption>
<hfoption id="TheLastBen">
Loading a checkpoint from TheLastBen is very similar. For example, to load the [TheLastBen/William_Eggleston_Style_SDXL](https://huggingface.co/TheLastBen/William_Eggleston_Style_SDXL) checkpoint:
```py
from diffusers import AutoPipelineForText2Image
import torch
pipeline = AutoPipelineForText2Image.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16).to("cuda")
pipeline.load_lora_weights("TheLastBen/William_Eggleston_Style_SDXL", weight_name="wegg.safetensors")
# use by william eggleston in the prompt to trigger the LoRA
prompt = "a house by william eggleston, sunrays, beautiful, sunlight, sunrays, beautiful"
image = pipeline(prompt=prompt).images[0]
image
```
</hfoption>
</hfoptions>
## IP-Adapter
[IP-Adapter](https://ip-adapter.github.io/) is a lightweight adapter that enables image prompting for any diffusion model. This adapter works by decoupling the cross-attention layers of the image and text features. All the other model components are frozen and only the embedded image features in the UNet are trained. As a result, IP-Adapter files are typically only ~100MBs.
You can learn more about how to use IP-Adapter for different tasks and specific use cases in the [IP-Adapter](../using-diffusers/ip_adapter) guide.
> [!TIP]
> Diffusers currently only supports IP-Adapter for some of the most popular pipelines. Feel free to open a feature request if you have a cool use case and want to integrate IP-Adapter with an unsupported pipeline!
> Official IP-Adapter checkpoints are available from [h94/IP-Adapter](https://huggingface.co/h94/IP-Adapter).
To start, load a Stable Diffusion checkpoint.
```py
from diffusers import AutoPipelineForText2Image
import torch
from diffusers.utils import load_image
pipeline = AutoPipelineForText2Image.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16).to("cuda")
```
Then load the IP-Adapter weights and add it to the pipeline with the [`~loaders.IPAdapterMixin.load_ip_adapter`] method.
```py
pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin")
```
Once loaded, you can use the pipeline with an image and text prompt to guide the image generation process.
```py
image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/load_neg_embed.png")
generator = torch.Generator(device="cpu").manual_seed(33)
images = pipeline(
prompt='best quality, high quality, wearing sunglasses',
ip_adapter_image=image,
negative_prompt="monochrome, lowres, bad anatomy, worst quality, low quality",
num_inference_steps=50,
generator=generator,
).images[0]
images
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/ip-bear.png" />
</div>
### IP-Adapter Plus
IP-Adapter relies on an image encoder to generate image features. If the IP-Adapter repository contains an `image_encoder` subfolder, the image encoder is automatically loaded and registered to the pipeline. Otherwise, you'll need to explicitly load the image encoder with a [`~transformers.CLIPVisionModelWithProjection`] model and pass it to the pipeline.
This is the case for *IP-Adapter Plus* checkpoints which use the ViT-H image encoder.
```py
from transformers import CLIPVisionModelWithProjection
image_encoder = CLIPVisionModelWithProjection.from_pretrained(
"h94/IP-Adapter",
subfolder="models/image_encoder",
torch_dtype=torch.float16
)
pipeline = AutoPipelineForText2Image.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0",
image_encoder=image_encoder,
torch_dtype=torch.float16
).to("cuda")
pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter-plus_sdxl_vit-h.safetensors")
```
| diffusers/docs/source/en/using-diffusers/loading_adapters.md/0 | {
"file_path": "diffusers/docs/source/en/using-diffusers/loading_adapters.md",
"repo_id": "diffusers",
"token_count": 4691
} | 102 |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Textual inversion
[[open-in-colab]]
The [`StableDiffusionPipeline`] supports textual inversion, a technique that enables a model like Stable Diffusion to learn a new concept from just a few sample images. This gives you more control over the generated images and allows you to tailor the model towards specific concepts. You can get started quickly with a collection of community created concepts in the [Stable Diffusion Conceptualizer](https://huggingface.co/spaces/sd-concepts-library/stable-diffusion-conceptualizer).
This guide will show you how to run inference with textual inversion using a pre-learned concept from the Stable Diffusion Conceptualizer. If you're interested in teaching a model new concepts with textual inversion, take a look at the [Textual Inversion](../training/text_inversion) training guide.
Import the necessary libraries:
```py
import torch
from diffusers import StableDiffusionPipeline
from diffusers.utils import make_image_grid
```
## Stable Diffusion 1 and 2
Pick a Stable Diffusion checkpoint and a pre-learned concept from the [Stable Diffusion Conceptualizer](https://huggingface.co/spaces/sd-concepts-library/stable-diffusion-conceptualizer):
```py
pretrained_model_name_or_path = "runwayml/stable-diffusion-v1-5"
repo_id_embeds = "sd-concepts-library/cat-toy"
```
Now you can load a pipeline, and pass the pre-learned concept to it:
```py
pipeline = StableDiffusionPipeline.from_pretrained(
pretrained_model_name_or_path, torch_dtype=torch.float16, use_safetensors=True
).to("cuda")
pipeline.load_textual_inversion(repo_id_embeds)
```
Create a prompt with the pre-learned concept by using the special placeholder token `<cat-toy>`, and choose the number of samples and rows of images you'd like to generate:
```py
prompt = "a grafitti in a favela wall with a <cat-toy> on it"
num_samples_per_row = 2
num_rows = 2
```
Then run the pipeline (feel free to adjust the parameters like `num_inference_steps` and `guidance_scale` to see how they affect image quality), save the generated images and visualize them with the helper function you created at the beginning:
```py
all_images = []
for _ in range(num_rows):
images = pipeline(prompt, num_images_per_prompt=num_samples_per_row, num_inference_steps=50, guidance_scale=7.5).images
all_images.extend(images)
grid = make_image_grid(all_images, num_rows, num_samples_per_row)
grid
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/textual_inversion_inference.png">
</div>
## Stable Diffusion XL
Stable Diffusion XL (SDXL) can also use textual inversion vectors for inference. In contrast to Stable Diffusion 1 and 2, SDXL has two text encoders so you'll need two textual inversion embeddings - one for each text encoder model.
Let's download the SDXL textual inversion embeddings and have a closer look at it's structure:
```py
from huggingface_hub import hf_hub_download
from safetensors.torch import load_file
file = hf_hub_download("dn118/unaestheticXL", filename="unaestheticXLv31.safetensors")
state_dict = load_file(file)
state_dict
```
```
{'clip_g': tensor([[ 0.0077, -0.0112, 0.0065, ..., 0.0195, 0.0159, 0.0275],
...,
[-0.0170, 0.0213, 0.0143, ..., -0.0302, -0.0240, -0.0362]],
'clip_l': tensor([[ 0.0023, 0.0192, 0.0213, ..., -0.0385, 0.0048, -0.0011],
...,
[ 0.0475, -0.0508, -0.0145, ..., 0.0070, -0.0089, -0.0163]],
```
There are two tensors, `"clip_g"` and `"clip_l"`.
`"clip_g"` corresponds to the bigger text encoder in SDXL and refers to
`pipe.text_encoder_2` and `"clip_l"` refers to `pipe.text_encoder`.
Now you can load each tensor separately by passing them along with the correct text encoder and tokenizer
to [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`]:
```py
from diffusers import AutoPipelineForText2Image
import torch
pipe = AutoPipelineForText2Image.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", variant="fp16", torch_dtype=torch.float16)
pipe.to("cuda")
pipe.load_textual_inversion(state_dict["clip_g"], token="unaestheticXLv31", text_encoder=pipe.text_encoder_2, tokenizer=pipe.tokenizer_2)
pipe.load_textual_inversion(state_dict["clip_l"], token="unaestheticXLv31", text_encoder=pipe.text_encoder, tokenizer=pipe.tokenizer)
# the embedding should be used as a negative embedding, so we pass it as a negative prompt
generator = torch.Generator().manual_seed(33)
image = pipe("a woman standing in front of a mountain", negative_prompt="unaestheticXLv31", generator=generator).images[0]
image
```
| diffusers/docs/source/en/using-diffusers/textual_inversion_inference.md/0 | {
"file_path": "diffusers/docs/source/en/using-diffusers/textual_inversion_inference.md",
"repo_id": "diffusers",
"token_count": 1716
} | 103 |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# 설치
사용하시는 라이브러리에 맞는 🤗 Diffusers를 설치하세요.
🤗 Diffusers는 Python 3.8+, PyTorch 1.7.0+ 및 flax에서 테스트되었습니다. 사용중인 딥러닝 라이브러리에 대한 아래의 설치 안내를 따르세요.
- [PyTorch 설치 안내](https://pytorch.org/get-started/locally/)
- [Flax 설치 안내](https://flax.readthedocs.io/en/latest/)
## pip를 이용한 설치
[가상 환경](https://docs.python.org/3/library/venv.html)에 🤗 Diffusers를 설치해야 합니다.
Python 가상 환경에 익숙하지 않은 경우 [가상환경 pip 설치 가이드](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/)를 살펴보세요.
가상 환경을 사용하면 서로 다른 프로젝트를 더 쉽게 관리하고, 종속성간의 호환성 문제를 피할 수 있습니다.
프로젝트 디렉토리에 가상 환경을 생성하는 것으로 시작하세요:
```bash
python -m venv .env
```
그리고 가상 환경을 활성화합니다:
```bash
source .env/bin/activate
```
이제 다음의 명령어로 🤗 Diffusers를 설치할 준비가 되었습니다:
**PyTorch의 경우**
```bash
pip install diffusers["torch"]
```
**Flax의 경우**
```bash
pip install diffusers["flax"]
```
## 소스로부터 설치
소스에서 `diffusers`를 설치하기 전에, `torch` 및 `accelerate`이 설치되어 있는지 확인하세요.
`torch` 설치에 대해서는 [torch docs](https://pytorch.org/get-started/locally/#start-locally)를 참고하세요.
다음과 같이 `accelerate`을 설치하세요.
```bash
pip install accelerate
```
다음 명령어를 사용하여 소스에서 🤗 Diffusers를 설치하세요:
```bash
pip install git+https://github.com/huggingface/diffusers
```
이 명령어는 최신 `stable` 버전이 아닌 최첨단 `main` 버전을 설치합니다.
`main` 버전은 최신 개발 정보를 최신 상태로 유지하는 데 유용합니다.
예를 들어 마지막 공식 릴리즈 이후 버그가 수정되었지만, 새 릴리즈가 아직 출시되지 않은 경우입니다.
그러나 이는 `main` 버전이 항상 안정적이지 않을 수 있음을 의미합니다.
우리는 `main` 버전이 지속적으로 작동하도록 노력하고 있으며, 대부분의 문제는 보통 몇 시간 또는 하루 안에 해결됩니다.
문제가 발생하면 더 빨리 해결할 수 있도록 [Issue](https://github.com/huggingface/transformers/issues)를 열어주세요!
## 편집가능한 설치
다음을 수행하려면 편집가능한 설치가 필요합니다:
* 소스 코드의 `main` 버전을 사용
* 🤗 Diffusers에 기여 (코드의 변경 사항을 테스트하기 위해 필요)
저장소를 복제하고 다음 명령어를 사용하여 🤗 Diffusers를 설치합니다:
```bash
git clone https://github.com/huggingface/diffusers.git
cd diffusers
```
**PyTorch의 경우**
```
pip install -e ".[torch]"
```
**Flax의 경우**
```
pip install -e ".[flax]"
```
이러한 명령어들은 저장소를 복제한 폴더와 Python 라이브러리 경로를 연결합니다.
Python은 이제 일반 라이브러리 경로에 더하여 복제한 폴더 내부를 살펴봅니다.
예를들어 Python 패키지가 `~/anaconda3/envs/main/lib/python3.8/site-packages/`에 설치되어 있는 경우 Python은 복제한 폴더인 `~/diffusers/`도 검색합니다.
<Tip warning={true}>
라이브러리를 계속 사용하려면 `diffusers` 폴더를 유지해야 합니다.
</Tip>
이제 다음 명령어를 사용하여 최신 버전의 🤗 Diffusers로 쉽게 업데이트할 수 있습니다:
```bash
cd ~/diffusers/
git pull
```
이렇게 하면, 다음에 실행할 때 Python 환경이 🤗 Diffusers의 `main` 버전을 찾게 됩니다.
## 텔레메트리 로깅에 대한 알림
우리 라이브러리는 `from_pretrained()` 요청 중에 텔레메트리 정보를 원격으로 수집합니다.
이 데이터에는 Diffusers 및 PyTorch/Flax의 버전, 요청된 모델 또는 파이프라인 클래스, 그리고 허브에서 호스팅되는 경우 사전학습된 체크포인트에 대한 경로를 포함합니다.
이 사용 데이터는 문제를 디버깅하고 새로운 기능의 우선순위를 지정하는데 도움이 됩니다.
텔레메트리는 HuggingFace 허브에서 모델과 파이프라인을 불러올 때만 전송되며, 로컬 사용 중에는 수집되지 않습니다.
우리는 추가 정보를 공유하지 않기를 원하는 사람이 있다는 것을 이해하고 개인 정보를 존중하므로, 터미널에서 `DISABLE_TELEMETRY` 환경 변수를 설정하여 텔레메트리 수집을 비활성화할 수 있습니다.
Linux/MacOS에서:
```bash
export DISABLE_TELEMETRY=YES
```
Windows에서:
```bash
set DISABLE_TELEMETRY=YES
``` | diffusers/docs/source/ko/installation.md/0 | {
"file_path": "diffusers/docs/source/ko/installation.md",
"repo_id": "diffusers",
"token_count": 3687
} | 104 |
<!--Copyright 2024 Custom Diffusion authors The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# 커스텀 Diffusion 학습 예제
[커스텀 Diffusion](https://arxiv.org/abs/2212.04488)은 피사체의 이미지 몇 장(4~5장)만 주어지면 Stable Diffusion처럼 text-to-image 모델을 커스터마이징하는 방법입니다.
'train_custom_diffusion.py' 스크립트는 학습 과정을 구현하고 이를 Stable Diffusion에 맞게 조정하는 방법을 보여줍니다.
이 교육 사례는 [Nupur Kumari](https://nupurkmr9.github.io/)가 제공하였습니다. (Custom Diffusion의 저자 중 한명).
## 로컬에서 PyTorch로 실행하기
### Dependencies 설치하기
스크립트를 실행하기 전에 라이브러리의 학습 dependencies를 설치해야 합니다:
**중요**
예제 스크립트의 최신 버전을 성공적으로 실행하려면 **소스로부터 설치**하는 것을 매우 권장하며, 예제 스크립트를 자주 업데이트하는 만큼 일부 예제별 요구 사항을 설치하고 설치를 최신 상태로 유지하는 것이 좋습니다. 이를 위해 새 가상 환경에서 다음 단계를 실행하세요:
```bash
git clone https://github.com/huggingface/diffusers
cd diffusers
pip install -e .
```
[example folder](https://github.com/huggingface/diffusers/tree/main/examples/custom_diffusion)로 cd하여 이동하세요.
```
cd examples/custom_diffusion
```
이제 실행
```bash
pip install -r requirements.txt
pip install clip-retrieval
```
그리고 [🤗Accelerate](https://github.com/huggingface/accelerate/) 환경을 초기화:
```bash
accelerate config
```
또는 사용자 환경에 대한 질문에 답하지 않고 기본 가속 구성을 사용하려면 다음과 같이 하세요.
```bash
accelerate config default
```
또는 사용 중인 환경이 대화형 셸을 지원하지 않는 경우(예: jupyter notebook)
```python
from accelerate.utils import write_basic_config
write_basic_config()
```
### 고양이 예제 😺
이제 데이터셋을 가져옵니다. [여기](https://www.cs.cmu.edu/~custom-diffusion/assets/data.zip)에서 데이터셋을 다운로드하고 압축을 풉니다. 직접 데이터셋을 사용하려면 [학습용 데이터셋 생성하기](create_dataset) 가이드를 참고하세요.
또한 'clip-retrieval'을 사용하여 200개의 실제 이미지를 수집하고, regularization으로서 이를 학습 데이터셋의 타겟 이미지와 결합합니다. 이렇게 하면 주어진 타겟 이미지에 대한 과적합을 방지할 수 있습니다. 다음 플래그를 사용하면 `prior_loss_weight=1.`로 `prior_preservation`, `real_prior` regularization을 활성화할 수 있습니다.
클래스_프롬프트`는 대상 이미지와 동일한 카테고리 이름이어야 합니다. 수집된 실제 이미지에는 `class_prompt`와 유사한 텍스트 캡션이 있습니다. 검색된 이미지는 `class_data_dir`에 저장됩니다. 생성된 이미지를 regularization으로 사용하기 위해 `real_prior`를 비활성화할 수 있습니다. 실제 이미지를 수집하려면 훈련 전에 이 명령을 먼저 사용하십시오.
```bash
pip install clip-retrieval
python retrieve.py --class_prompt cat --class_data_dir real_reg/samples_cat --num_class_images 200
```
**___참고: [stable-diffusion-2](https://huggingface.co/stabilityai/stable-diffusion-2) 768x768 모델을 사용하는 경우 '해상도'를 768로 변경하세요.___**
스크립트는 모델 체크포인트와 `pytorch_custom_diffusion_weights.bin` 파일을 생성하여 저장소에 저장합니다.
```bash
export MODEL_NAME="CompVis/stable-diffusion-v1-4"
export OUTPUT_DIR="path-to-save-model"
export INSTANCE_DIR="./data/cat"
accelerate launch train_custom_diffusion.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--instance_data_dir=$INSTANCE_DIR \
--output_dir=$OUTPUT_DIR \
--class_data_dir=./real_reg/samples_cat/ \
--with_prior_preservation --real_prior --prior_loss_weight=1.0 \
--class_prompt="cat" --num_class_images=200 \
--instance_prompt="photo of a <new1> cat" \
--resolution=512 \
--train_batch_size=2 \
--learning_rate=1e-5 \
--lr_warmup_steps=0 \
--max_train_steps=250 \
--scale_lr --hflip \
--modifier_token "<new1>" \
--push_to_hub
```
**더 낮은 VRAM 요구 사항(GPU당 16GB)으로 더 빠르게 훈련하려면 `--enable_xformers_memory_efficient_attention`을 사용하세요. 설치 방법은 [가이드](https://github.com/facebookresearch/xformers)를 따르세요.**
가중치 및 편향(`wandb`)을 사용하여 실험을 추적하고 중간 결과를 저장하려면(강력히 권장합니다) 다음 단계를 따르세요:
* `wandb` 설치: `pip install wandb`.
* 로그인 : `wandb login`.
* 그런 다음 트레이닝을 시작하는 동안 `validation_prompt`를 지정하고 `report_to`를 `wandb`로 설정합니다. 다음과 같은 관련 인수를 구성할 수도 있습니다:
* `num_validation_images`
* `validation_steps`
```bash
accelerate launch train_custom_diffusion.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--instance_data_dir=$INSTANCE_DIR \
--output_dir=$OUTPUT_DIR \
--class_data_dir=./real_reg/samples_cat/ \
--with_prior_preservation --real_prior --prior_loss_weight=1.0 \
--class_prompt="cat" --num_class_images=200 \
--instance_prompt="photo of a <new1> cat" \
--resolution=512 \
--train_batch_size=2 \
--learning_rate=1e-5 \
--lr_warmup_steps=0 \
--max_train_steps=250 \
--scale_lr --hflip \
--modifier_token "<new1>" \
--validation_prompt="<new1> cat sitting in a bucket" \
--report_to="wandb" \
--push_to_hub
```
다음은 [Weights and Biases page](https://wandb.ai/sayakpaul/custom-diffusion/runs/26ghrcau)의 예시이며, 여러 학습 세부 정보와 함께 중간 결과들을 확인할 수 있습니다.
`--push_to_hub`를 지정하면 학습된 파라미터가 허깅 페이스 허브의 리포지토리에 푸시됩니다. 다음은 [예제 리포지토리](https://huggingface.co/sayakpaul/custom-diffusion-cat)입니다.
### 멀티 컨셉에 대한 학습 🐱🪵
[this](https://github.com/ShivamShrirao/diffusers/blob/main/examples/dreambooth/train_dreambooth.py)와 유사하게 각 컨셉에 대한 정보가 포함된 [json](https://github.com/adobe-research/custom-diffusion/blob/main/assets/concept_list.json) 파일을 제공합니다.
실제 이미지를 수집하려면 json 파일의 각 컨셉에 대해 이 명령을 실행합니다.
```bash
pip install clip-retrieval
python retrieve.py --class_prompt {} --class_data_dir {} --num_class_images 200
```
그럼 우리는 학습시킬 준비가 되었습니다!
```bash
export MODEL_NAME="CompVis/stable-diffusion-v1-4"
export OUTPUT_DIR="path-to-save-model"
accelerate launch train_custom_diffusion.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--output_dir=$OUTPUT_DIR \
--concepts_list=./concept_list.json \
--with_prior_preservation --real_prior --prior_loss_weight=1.0 \
--resolution=512 \
--train_batch_size=2 \
--learning_rate=1e-5 \
--lr_warmup_steps=0 \
--max_train_steps=500 \
--num_class_images=200 \
--scale_lr --hflip \
--modifier_token "<new1>+<new2>" \
--push_to_hub
```
다음은 [Weights and Biases page](https://wandb.ai/sayakpaul/custom-diffusion/runs/3990tzkg)의 예시이며, 다른 학습 세부 정보와 함께 중간 결과들을 확인할 수 있습니다.
### 사람 얼굴에 대한 학습
사람 얼굴에 대한 파인튜닝을 위해 다음과 같은 설정이 더 효과적이라는 것을 확인했습니다: `learning_rate=5e-6`, `max_train_steps=1000 to 2000`, `freeze_model=crossattn`을 최소 15~20개의 이미지로 설정합니다.
실제 이미지를 수집하려면 훈련 전에 이 명령을 먼저 사용하십시오.
```bash
pip install clip-retrieval
python retrieve.py --class_prompt person --class_data_dir real_reg/samples_person --num_class_images 200
```
이제 학습을 시작하세요!
```bash
export MODEL_NAME="CompVis/stable-diffusion-v1-4"
export OUTPUT_DIR="path-to-save-model"
export INSTANCE_DIR="path-to-images"
accelerate launch train_custom_diffusion.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--instance_data_dir=$INSTANCE_DIR \
--output_dir=$OUTPUT_DIR \
--class_data_dir=./real_reg/samples_person/ \
--with_prior_preservation --real_prior --prior_loss_weight=1.0 \
--class_prompt="person" --num_class_images=200 \
--instance_prompt="photo of a <new1> person" \
--resolution=512 \
--train_batch_size=2 \
--learning_rate=5e-6 \
--lr_warmup_steps=0 \
--max_train_steps=1000 \
--scale_lr --hflip --noaug \
--freeze_model crossattn \
--modifier_token "<new1>" \
--enable_xformers_memory_efficient_attention \
--push_to_hub
```
## 추론
위 프롬프트를 사용하여 모델을 학습시킨 후에는 아래 프롬프트를 사용하여 추론을 실행할 수 있습니다. 프롬프트에 'modifier token'(예: 위 예제에서는 \<new1\>)을 반드시 포함해야 합니다.
```python
import torch
from diffusers import DiffusionPipeline
pipe = DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16).to("cuda")
pipe.unet.load_attn_procs("path-to-save-model", weight_name="pytorch_custom_diffusion_weights.bin")
pipe.load_textual_inversion("path-to-save-model", weight_name="<new1>.bin")
image = pipe(
"<new1> cat sitting in a bucket",
num_inference_steps=100,
guidance_scale=6.0,
eta=1.0,
).images[0]
image.save("cat.png")
```
허브 리포지토리에서 이러한 매개변수를 직접 로드할 수 있습니다:
```python
import torch
from huggingface_hub.repocard import RepoCard
from diffusers import DiffusionPipeline
model_id = "sayakpaul/custom-diffusion-cat"
card = RepoCard.load(model_id)
base_model_id = card.data.to_dict()["base_model"]
pipe = DiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16).to("cuda")
pipe.unet.load_attn_procs(model_id, weight_name="pytorch_custom_diffusion_weights.bin")
pipe.load_textual_inversion(model_id, weight_name="<new1>.bin")
image = pipe(
"<new1> cat sitting in a bucket",
num_inference_steps=100,
guidance_scale=6.0,
eta=1.0,
).images[0]
image.save("cat.png")
```
다음은 여러 컨셉으로 추론을 수행하는 예제입니다:
```python
import torch
from huggingface_hub.repocard import RepoCard
from diffusers import DiffusionPipeline
model_id = "sayakpaul/custom-diffusion-cat-wooden-pot"
card = RepoCard.load(model_id)
base_model_id = card.data.to_dict()["base_model"]
pipe = DiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16).to("cuda")
pipe.unet.load_attn_procs(model_id, weight_name="pytorch_custom_diffusion_weights.bin")
pipe.load_textual_inversion(model_id, weight_name="<new1>.bin")
pipe.load_textual_inversion(model_id, weight_name="<new2>.bin")
image = pipe(
"the <new1> cat sculpture in the style of a <new2> wooden pot",
num_inference_steps=100,
guidance_scale=6.0,
eta=1.0,
).images[0]
image.save("multi-subject.png")
```
여기서 '고양이'와 '나무 냄비'는 여러 컨셉을 말합니다.
### 학습된 체크포인트에서 추론하기
`--checkpointing_steps` 인수를 사용한 경우 학습 과정에서 저장된 전체 체크포인트 중 하나에서 추론을 수행할 수도 있습니다.
## Grads를 None으로 설정
더 많은 메모리를 절약하려면 스크립트에 `--set_grads_to_none` 인수를 전달하세요. 이렇게 하면 성적이 0이 아닌 없음으로 설정됩니다. 그러나 특정 동작이 변경되므로 문제가 발생하면 이 인수를 제거하세요.
자세한 정보: https://pytorch.org/docs/stable/generated/torch.optim.Optimizer.zero_grad.html
## 실험 결과
실험에 대한 자세한 내용은 [당사 웹페이지](https://www.cs.cmu.edu/~custom-diffusion/)를 참조하세요. | diffusers/docs/source/ko/training/custom_diffusion.md/0 | {
"file_path": "diffusers/docs/source/ko/training/custom_diffusion.md",
"repo_id": "diffusers",
"token_count": 7052
} | 105 |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# 커스텀 파이프라인 불러오기
[[open-in-colab]]
커뮤니티 파이프라인은 논문에 명시된 원래의 구현체와 다른 형태로 구현된 모든 [`DiffusionPipeline`] 클래스를 의미합니다. (예를 들어, [`StableDiffusionControlNetPipeline`]는 ["Text-to-Image Generation with ControlNet Conditioning"](https://arxiv.org/abs/2302.05543) 해당) 이들은 추가 기능을 제공하거나 파이프라인의 원래 구현을 확장합니다.
[Speech to Image](https://github.com/huggingface/diffusers/tree/main/examples/community#speech-to-image) 또는 [Composable Stable Diffusion](https://github.com/huggingface/diffusers/tree/main/examples/community#composable-stable-diffusion) 과 같은 멋진 커뮤니티 파이프라인이 많이 있으며 [여기에서](https://github.com/huggingface/diffusers/tree/main/examples/community) 모든 공식 커뮤니티 파이프라인을 찾을 수 있습니다.
허브에서 커뮤니티 파이프라인을 로드하려면, 커뮤니티 파이프라인의 리포지토리 ID와 (파이프라인 가중치 및 구성 요소를 로드하려는) 모델의 리포지토리 ID를 인자로 전달해야 합니다. 예를 들어, 아래 예시에서는 `hf-internal-testing/diffusers-dummy-pipeline`에서 더미 파이프라인을 불러오고, `google/ddpm-cifar10-32`에서 파이프라인의 가중치와 컴포넌트들을 로드합니다.
<Tip warning={true}>
🔒 허깅 페이스 허브에서 커뮤니티 파이프라인을 불러오는 것은 곧 해당 코드가 안전하다고 신뢰하는 것입니다. 코드를 자동으로 불러오고 실행하기 앞서 반드시 온라인으로 해당 코드의 신뢰성을 검사하세요!
</Tip>
```py
from diffusers import DiffusionPipeline
pipeline = DiffusionPipeline.from_pretrained(
"google/ddpm-cifar10-32", custom_pipeline="hf-internal-testing/diffusers-dummy-pipeline"
)
```
공식 커뮤니티 파이프라인을 불러오는 것은 비슷하지만, 공식 리포지토리 ID에서 가중치를 불러오는 것과 더불어 해당 파이프라인 내의 컴포넌트를 직접 지정하는 것 역시 가능합니다. 아래 예제를 보면 커뮤니티 [CLIP Guided Stable Diffusion](https://github.com/huggingface/diffusers/tree/main/examples/community#clip-guided-stable-diffusion) 파이프라인을 로드할 때, 해당 파이프라인에서 사용할 `clip_model` 컴포넌트와 `feature_extractor` 컴포넌트를 직접 설정하는 것을 확인할 수 있습니다.
```py
from diffusers import DiffusionPipeline
from transformers import CLIPImageProcessor, CLIPModel
clip_model_id = "laion/CLIP-ViT-B-32-laion2B-s34B-b79K"
feature_extractor = CLIPImageProcessor.from_pretrained(clip_model_id)
clip_model = CLIPModel.from_pretrained(clip_model_id)
pipeline = DiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5",
custom_pipeline="clip_guided_stable_diffusion",
clip_model=clip_model,
feature_extractor=feature_extractor,
)
```
커뮤니티 파이프라인에 대한 자세한 내용은 [커뮤니티 파이프라인](https://github.com/huggingface/diffusers/blob/main/docs/source/en/using-diffusers/custom_pipeline_examples) 가이드를 살펴보세요. 커뮤니티 파이프라인 등록에 관심이 있는 경우 [커뮤니티 파이프라인에 기여하는 방법](https://github.com/huggingface/diffusers/blob/main/docs/source/en/using-diffusers/contribute_pipeline)에 대한 가이드를 확인하세요 ! | diffusers/docs/source/ko/using-diffusers/custom_pipeline_overview.md/0 | {
"file_path": "diffusers/docs/source/ko/using-diffusers/custom_pipeline_overview.md",
"repo_id": "diffusers",
"token_count": 2382
} | 106 |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# 파이프라인, 모델 및 스케줄러 이해하기
[[open-in-colab]]
🧨 Diffusers는 사용자 친화적이며 유연한 도구 상자로, 사용사례에 맞게 diffusion 시스템을 구축 할 수 있도록 설계되었습니다. 이 도구 상자의 핵심은 모델과 스케줄러입니다. [`DiffusionPipeline`]은 편의를 위해 이러한 구성 요소를 번들로 제공하지만, 파이프라인을 분리하고 모델과 스케줄러를 개별적으로 사용해 새로운 diffusion 시스템을 만들 수도 있습니다.
이 튜토리얼에서는 기본 파이프라인부터 시작해 Stable Diffusion 파이프라인까지 진행하며 모델과 스케줄러를 사용해 추론을 위한 diffusion 시스템을 조립하는 방법을 배웁니다.
## 기본 파이프라인 해체하기
파이프라인은 추론을 위해 모델을 실행하는 빠르고 쉬운 방법으로, 이미지를 생성하는 데 코드가 4줄 이상 필요하지 않습니다:
```py
>>> from diffusers import DDPMPipeline
>>> ddpm = DDPMPipeline.from_pretrained("google/ddpm-cat-256").to("cuda")
>>> image = ddpm(num_inference_steps=25).images[0]
>>> image
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ddpm-cat.png" alt="Image of cat created from DDPMPipeline"/>
</div>
정말 쉽습니다. 그런데 파이프라인은 어떻게 이렇게 할 수 있었을까요? 파이프라인을 세분화하여 내부에서 어떤 일이 일어나고 있는지 살펴보겠습니다.
위 예시에서 파이프라인에는 [`UNet2DModel`] 모델과 [`DDPMScheduler`]가 포함되어 있습니다. 파이프라인은 원하는 출력 크기의 랜덤 노이즈를 받아 모델을 여러번 통과시켜 이미지의 노이즈를 제거합니다. 각 timestep에서 모델은 *noise residual*을 예측하고 스케줄러는 이를 사용하여 노이즈가 적은 이미지를 예측합니다. 파이프라인은 지정된 추론 스텝수에 도달할 때까지 이 과정을 반복합니다.
모델과 스케줄러를 별도로 사용하여 파이프라인을 다시 생성하기 위해 자체적인 노이즈 제거 프로세스를 작성해 보겠습니다.
1. 모델과 스케줄러를 불러옵니다:
```py
>>> from diffusers import DDPMScheduler, UNet2DModel
>>> scheduler = DDPMScheduler.from_pretrained("google/ddpm-cat-256")
>>> model = UNet2DModel.from_pretrained("google/ddpm-cat-256").to("cuda")
```
2. 노이즈 제거 프로세스를 실행할 timestep 수를 설정합니다:
```py
>>> scheduler.set_timesteps(50)
```
3. 스케줄러의 timestep을 설정하면 균등한 간격의 구성 요소를 가진 텐서가 생성됩니다.(이 예시에서는 50개) 각 요소는 모델이 이미지의 노이즈를 제거하는 시간 간격에 해당합니다. 나중에 노이즈 제거 루프를 만들 때 이 텐서를 반복하여 이미지의 노이즈를 제거합니다:
```py
>>> scheduler.timesteps
tensor([980, 960, 940, 920, 900, 880, 860, 840, 820, 800, 780, 760, 740, 720,
700, 680, 660, 640, 620, 600, 580, 560, 540, 520, 500, 480, 460, 440,
420, 400, 380, 360, 340, 320, 300, 280, 260, 240, 220, 200, 180, 160,
140, 120, 100, 80, 60, 40, 20, 0])
```
4. 원하는 출력과 같은 모양을 가진 랜덤 노이즈를 생성합니다:
```py
>>> import torch
>>> sample_size = model.config.sample_size
>>> noise = torch.randn((1, 3, sample_size, sample_size), device="cuda")
```
5. 이제 timestep을 반복하는 루프를 작성합니다. 각 timestep에서 모델은 [`UNet2DModel.forward`]를 통해 noisy residual을 반환합니다. 스케줄러의 [`~DDPMScheduler.step`] 메서드는 noisy residual, timestep, 그리고 입력을 받아 이전 timestep에서 이미지를 예측합니다. 이 출력은 노이즈 제거 루프의 모델에 대한 다음 입력이 되며, `timesteps` 배열의 끝에 도달할 때까지 반복됩니다.
```py
>>> input = noise
>>> for t in scheduler.timesteps:
... with torch.no_grad():
... noisy_residual = model(input, t).sample
... previous_noisy_sample = scheduler.step(noisy_residual, t, input).prev_sample
... input = previous_noisy_sample
```
이것이 전체 노이즈 제거 프로세스이며, 동일한 패턴을 사용해 모든 diffusion 시스템을 작성할 수 있습니다.
6. 마지막 단계는 노이즈가 제거된 출력을 이미지로 변환하는 것입니다:
```py
>>> from PIL import Image
>>> import numpy as np
>>> image = (input / 2 + 0.5).clamp(0, 1)
>>> image = image.cpu().permute(0, 2, 3, 1).numpy()[0]
>>> image = Image.fromarray((image * 255).round().astype("uint8"))
>>> image
```
다음 섹션에서는 여러분의 기술을 시험해보고 좀 더 복잡한 Stable Diffusion 파이프라인을 분석해 보겠습니다. 방법은 거의 동일합니다. 필요한 구성요소들을 초기화하고 timestep수를 설정하여 `timestep` 배열을 생성합니다. 노이즈 제거 루프에서 `timestep` 배열이 사용되며, 이 배열의 각 요소에 대해 모델은 노이즈가 적은 이미지를 예측합니다. 노이즈 제거 루프는 `timestep`을 반복하고 각 timestep에서 noise residual을 출력하고 스케줄러는 이를 사용하여 이전 timestep에서 노이즈가 덜한 이미지를 예측합니다. 이 프로세스는 `timestep` 배열의 끝에 도달할 때까지 반복됩니다.
한번 사용해 봅시다!
## Stable Diffusion 파이프라인 해체하기
Stable Diffusion 은 text-to-image *latent diffusion* 모델입니다. latent diffusion 모델이라고 불리는 이유는 실제 픽셀 공간 대신 이미지의 저차원의 표현으로 작업하기 때문이고, 메모리 효율이 더 높습니다. 인코더는 이미지를 더 작은 표현으로 압축하고, 디코더는 압축된 표현을 다시 이미지로 변환합니다. text-to-image 모델의 경우 텍스트 임베딩을 생성하기 위해 tokenizer와 인코더가 필요합니다. 이전 예제에서 이미 UNet 모델과 스케줄러가 필요하다는 것은 알고 계셨을 것입니다.
보시다시피, 이것은 UNet 모델만 포함된 DDPM 파이프라인보다 더 복잡합니다. Stable Diffusion 모델에는 세 개의 개별 사전학습된 모델이 있습니다.
<Tip>
💡 VAE, UNet 및 텍스트 인코더 모델의 작동방식에 대한 자세한 내용은 [How does Stable Diffusion work?](https://huggingface.co/blog/stable_diffusion#how-does-stable-diffusion-work) 블로그를 참조하세요.
</Tip>
이제 Stable Diffusion 파이프라인에 필요한 구성요소들이 무엇인지 알았으니, [`~ModelMixin.from_pretrained`] 메서드를 사용해 모든 구성요소를 불러옵니다. 사전학습된 체크포인트 [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5)에서 찾을 수 있으며, 각 구성요소들은 별도의 하위 폴더에 저장되어 있습니다:
```py
>>> from PIL import Image
>>> import torch
>>> from transformers import CLIPTextModel, CLIPTokenizer
>>> from diffusers import AutoencoderKL, UNet2DConditionModel, PNDMScheduler
>>> vae = AutoencoderKL.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="vae")
>>> tokenizer = CLIPTokenizer.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="tokenizer")
>>> text_encoder = CLIPTextModel.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="text_encoder")
>>> unet = UNet2DConditionModel.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="unet")
```
기본 [`PNDMScheduler`] 대신, [`UniPCMultistepScheduler`]로 교체하여 다른 스케줄러를 얼마나 쉽게 연결할 수 있는지 확인합니다:
```py
>>> from diffusers import UniPCMultistepScheduler
>>> scheduler = UniPCMultistepScheduler.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="scheduler")
```
추론 속도를 높이려면 스케줄러와 달리 학습 가능한 가중치가 있으므로 모델을 GPU로 옮기세요:
```py
>>> torch_device = "cuda"
>>> vae.to(torch_device)
>>> text_encoder.to(torch_device)
>>> unet.to(torch_device)
```
### 텍스트 임베딩 생성하기
다음 단계는 임베딩을 생성하기 위해 텍스트를 토큰화하는 것입니다. 이 텍스트는 UNet 모델에서 condition으로 사용되고 입력 프롬프트와 유사한 방향으로 diffusion 프로세스를 조정하는 데 사용됩니다.
<Tip>
💡 `guidance_scale` 매개변수는 이미지를 생성할 때 프롬프트에 얼마나 많은 가중치를 부여할지 결정합니다.
</Tip>
다른 프롬프트를 생성하고 싶다면 원하는 프롬프트를 자유롭게 선택하세요!
```py
>>> prompt = ["a photograph of an astronaut riding a horse"]
>>> height = 512 # Stable Diffusion의 기본 높이
>>> width = 512 # Stable Diffusion의 기본 너비
>>> num_inference_steps = 25 # 노이즈 제거 스텝 수
>>> guidance_scale = 7.5 # classifier-free guidance를 위한 scale
>>> generator = torch.manual_seed(0) # 초기 잠재 노이즈를 생성하는 seed generator
>>> batch_size = len(prompt)
```
텍스트를 토큰화하고 프롬프트에서 임베딩을 생성합니다:
```py
>>> text_input = tokenizer(
... prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt"
... )
>>> with torch.no_grad():
... text_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0]
```
또한 패딩 토큰의 임베딩인 *unconditional 텍스트 임베딩*을 생성해야 합니다. 이 임베딩은 조건부 `text_embeddings`과 동일한 shape(`batch_size` 그리고 `seq_length`)을 가져야 합니다:
```py
>>> max_length = text_input.input_ids.shape[-1]
>>> uncond_input = tokenizer([""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt")
>>> uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0]
```
두번의 forward pass를 피하기 위해 conditional 임베딩과 unconditional 임베딩을 배치(batch)로 연결하겠습니다:
```py
>>> text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
```
### 랜덤 노이즈 생성
그다음 diffusion 프로세스의 시작점으로 초기 랜덤 노이즈를 생성합니다. 이것이 이미지의 잠재적 표현이며 점차적으로 노이즈가 제거됩니다. 이 시점에서 `latent` 이미지는 최종 이미지 크기보다 작지만 나중에 모델이 이를 512x512 이미지 크기로 변환하므로 괜찮습니다.
<Tip>
💡 `vae` 모델에는 3개의 다운 샘플링 레이어가 있기 때문에 높이와 너비가 8로 나뉩니다. 다음을 실행하여 확인할 수 있습니다:
```py
2 ** (len(vae.config.block_out_channels) - 1) == 8
```
</Tip>
```py
>>> latents = torch.randn(
... (batch_size, unet.config.in_channels, height // 8, width // 8),
... generator=generator,
... device=torch_device,
... )
```
### 이미지 노이즈 제거
먼저 [`UniPCMultistepScheduler`]와 같은 향상된 스케줄러에 필요한 노이즈 스케일 값인 초기 노이즈 분포 *sigma* 로 입력을 스케일링 하는 것부터 시작합니다:
```py
>>> latents = latents * scheduler.init_noise_sigma
```
마지막 단계는 `latent`의 순수한 노이즈를 점진적으로 프롬프트에 설명된 이미지로 변환하는 노이즈 제거 루프를 생성하는 것입니다. 노이즈 제거 루프는 세 가지 작업을 수행해야 한다는 점을 기억하세요:
1. 노이즈 제거 중에 사용할 스케줄러의 timesteps를 설정합니다.
2. timestep을 따라 반복합니다.
3. 각 timestep에서 UNet 모델을 호출하여 noise residual을 예측하고 스케줄러에 전달하여 이전 노이즈 샘플을 계산합니다.
```py
>>> from tqdm.auto import tqdm
>>> scheduler.set_timesteps(num_inference_steps)
>>> for t in tqdm(scheduler.timesteps):
... # classifier-free guidance를 수행하는 경우 두번의 forward pass를 수행하지 않도록 latent를 확장.
... latent_model_input = torch.cat([latents] * 2)
... latent_model_input = scheduler.scale_model_input(latent_model_input, timestep=t)
... # noise residual 예측
... with torch.no_grad():
... noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
... # guidance 수행
... noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
... noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
... # 이전 노이즈 샘플을 계산 x_t -> x_t-1
... latents = scheduler.step(noise_pred, t, latents).prev_sample
```
### 이미지 디코딩
마지막 단계는 `vae`를 이용하여 잠재 표현을 이미지로 디코딩하고 `sample`과 함께 디코딩된 출력을 얻는 것입니다:
```py
# latent를 스케일링하고 vae로 이미지 디코딩
latents = 1 / 0.18215 * latents
with torch.no_grad():
image = vae.decode(latents).sample
```
마지막으로 이미지를 `PIL.Image`로 변환하면 생성된 이미지를 확인할 수 있습니다!
```py
>>> image = (image / 2 + 0.5).clamp(0, 1)
>>> image = image.detach().cpu().permute(0, 2, 3, 1).numpy()
>>> images = (image * 255).round().astype("uint8")
>>> pil_images = [Image.fromarray(image) for image in images]
>>> pil_images[0]
```
<div class="flex justify-center">
<img src="https://huggingface.co/blog/assets/98_stable_diffusion/stable_diffusion_k_lms.png"/>
</div>
## 다음 단계
기본 파이프라인부터 복잡한 파이프라인까지, 자신만의 diffusion 시스템을 작성하는 데 필요한 것은 노이즈 제거 루프뿐이라는 것을 알 수 있었습니다. 이 루프는 스케줄러의 timesteps를 설정하고, 이를 반복하며, UNet 모델을 호출하여 noise residual을 예측하고 스케줄러에 전달하여 이전 노이즈 샘플을 계산하는 과정을 번갈아 가며 수행해야 합니다.
이것이 바로 🧨 Diffusers가 설계된 목적입니다: 모델과 스케줄러를 사용해 자신만의 diffusion 시스템을 직관적이고 쉽게 작성할 수 있도록 하기 위해서입니다.
다음 단계를 자유롭게 진행하세요:
* 🧨 Diffusers에 [파이프라인 구축 및 기여](using-diffusers/#contribute_pipeline)하는 방법을 알아보세요. 여러분이 어떤 아이디어를 내놓을지 기대됩니다!
* 라이브러리에서 [기본 파이프라인](./api/pipelines/overview)을 살펴보고, 모델과 스케줄러를 별도로 사용하여 파이프라인을 처음부터 해체하고 빌드할 수 있는지 확인해 보세요.
| diffusers/docs/source/ko/using-diffusers/write_own_pipeline.md/0 | {
"file_path": "diffusers/docs/source/ko/using-diffusers/write_own_pipeline.md",
"repo_id": "diffusers",
"token_count": 9948
} | 107 |
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import copy
import logging
import math
import os
import shutil
from contextlib import nullcontext
from pathlib import Path
import torch
import torch.nn.functional as F
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import ProjectConfiguration, set_seed
from datasets import load_dataset
from peft import LoraConfig
from peft.utils import get_peft_model_state_dict
from PIL import Image
from PIL.ImageOps import exif_transpose
from torch.utils.data import DataLoader, Dataset, default_collate
from torchvision import transforms
from transformers import (
CLIPTextModelWithProjection,
CLIPTokenizer,
)
import diffusers.optimization
from diffusers import AmusedPipeline, AmusedScheduler, EMAModel, UVit2DModel, VQModel
from diffusers.loaders import LoraLoaderMixin
from diffusers.utils import is_wandb_available
if is_wandb_available():
import wandb
logger = get_logger(__name__, log_level="INFO")
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--pretrained_model_name_or_path",
type=str,
default=None,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models.",
)
parser.add_argument(
"--revision",
type=str,
default=None,
required=False,
help="Revision of pretrained model identifier from huggingface.co/models.",
)
parser.add_argument(
"--variant",
type=str,
default=None,
help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16",
)
parser.add_argument(
"--instance_data_dataset",
type=str,
default=None,
required=False,
help="A Hugging Face dataset containing the training images",
)
parser.add_argument(
"--instance_data_dir",
type=str,
default=None,
required=False,
help="A folder containing the training data of instance images.",
)
parser.add_argument(
"--instance_data_image", type=str, default=None, required=False, help="A single training image"
)
parser.add_argument(
"--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
)
parser.add_argument(
"--dataloader_num_workers",
type=int,
default=0,
help=(
"Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
),
)
parser.add_argument(
"--allow_tf32",
action="store_true",
help=(
"Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
" https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
),
)
parser.add_argument("--use_ema", action="store_true", help="Whether to use EMA model.")
parser.add_argument("--ema_decay", type=float, default=0.9999)
parser.add_argument("--ema_update_after_step", type=int, default=0)
parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
parser.add_argument(
"--output_dir",
type=str,
default="muse_training",
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
parser.add_argument(
"--logging_dir",
type=str,
default="logs",
help=(
"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
),
)
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--checkpointing_steps",
type=int,
default=500,
help=(
"Save a checkpoint of the training state every X updates. Checkpoints can be used for resuming training via `--resume_from_checkpoint`. "
"In the case that the checkpoint is better than the final trained model, the checkpoint can also be used for inference."
"Using a checkpoint for inference requires separate loading of the original pipeline and the individual checkpointed model components."
"See https://huggingface.co/docs/diffusers/main/en/training/dreambooth#performing-inference-using-a-saved-checkpoint for step by step"
"instructions."
),
)
parser.add_argument(
"--logging_steps",
type=int,
default=50,
)
parser.add_argument(
"--checkpoints_total_limit",
type=int,
default=None,
help=(
"Max number of checkpoints to store. Passed as `total_limit` to the `Accelerator` `ProjectConfiguration`."
" See Accelerator::save_state https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.save_state"
" for more details"
),
)
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default=None,
help=(
"Whether training should be resumed from a previous checkpoint. Use a path saved by"
' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
),
)
parser.add_argument(
"--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader."
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=0.0003,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument(
"--scale_lr",
action="store_true",
default=False,
help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
)
parser.add_argument(
"--lr_scheduler",
type=str,
default="constant",
help=(
'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
' "constant", "constant_with_warmup"]'
),
)
parser.add_argument(
"--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument(
"--validation_steps",
type=int,
default=100,
help=(
"Run validation every X steps. Validation consists of running the prompt"
" `args.validation_prompt` multiple times: `args.num_validation_images`"
" and logging the images."
),
)
parser.add_argument(
"--mixed_precision",
type=str,
default=None,
choices=["no", "fp16", "bf16"],
help=(
"Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
" 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
" flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
),
)
parser.add_argument(
"--report_to",
type=str,
default="wandb",
help=(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
),
)
parser.add_argument("--validation_prompts", type=str, nargs="*")
parser.add_argument(
"--resolution",
type=int,
default=512,
help=(
"The resolution for input images, all the images in the train/validation dataset will be resized to this"
" resolution"
),
)
parser.add_argument("--split_vae_encode", type=int, required=False, default=None)
parser.add_argument("--min_masking_rate", type=float, default=0.0)
parser.add_argument("--cond_dropout_prob", type=float, default=0.0)
parser.add_argument("--max_grad_norm", default=None, type=float, help="Max gradient norm.", required=False)
parser.add_argument("--use_lora", action="store_true", help="Fine tune the model using LoRa")
parser.add_argument("--text_encoder_use_lora", action="store_true", help="Fine tune the model using LoRa")
parser.add_argument("--lora_r", default=16, type=int)
parser.add_argument("--lora_alpha", default=32, type=int)
parser.add_argument("--lora_target_modules", default=["to_q", "to_k", "to_v"], type=str, nargs="+")
parser.add_argument("--text_encoder_lora_r", default=16, type=int)
parser.add_argument("--text_encoder_lora_alpha", default=32, type=int)
parser.add_argument("--text_encoder_lora_target_modules", default=["to_q", "to_k", "to_v"], type=str, nargs="+")
parser.add_argument("--train_text_encoder", action="store_true")
parser.add_argument("--image_key", type=str, required=False)
parser.add_argument("--prompt_key", type=str, required=False)
parser.add_argument(
"--gradient_checkpointing",
action="store_true",
help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
)
parser.add_argument("--prompt_prefix", type=str, required=False, default=None)
args = parser.parse_args()
if args.report_to == "wandb":
if not is_wandb_available():
raise ImportError("Make sure to install wandb if you want to use it for logging during training.")
num_datasources = sum(
[x is not None for x in [args.instance_data_dir, args.instance_data_image, args.instance_data_dataset]]
)
if num_datasources != 1:
raise ValueError(
"provide one and only one of `--instance_data_dir`, `--instance_data_image`, or `--instance_data_dataset`"
)
if args.instance_data_dir is not None:
if not os.path.exists(args.instance_data_dir):
raise ValueError(f"Does not exist: `--args.instance_data_dir` {args.instance_data_dir}")
if args.instance_data_image is not None:
if not os.path.exists(args.instance_data_image):
raise ValueError(f"Does not exist: `--args.instance_data_image` {args.instance_data_image}")
if args.instance_data_dataset is not None and (args.image_key is None or args.prompt_key is None):
raise ValueError("`--instance_data_dataset` requires setting `--image_key` and `--prompt_key`")
return args
class InstanceDataRootDataset(Dataset):
def __init__(
self,
instance_data_root,
tokenizer,
size=512,
):
self.size = size
self.tokenizer = tokenizer
self.instance_images_path = list(Path(instance_data_root).iterdir())
def __len__(self):
return len(self.instance_images_path)
def __getitem__(self, index):
image_path = self.instance_images_path[index % len(self.instance_images_path)]
instance_image = Image.open(image_path)
rv = process_image(instance_image, self.size)
prompt = os.path.splitext(os.path.basename(image_path))[0]
rv["prompt_input_ids"] = tokenize_prompt(self.tokenizer, prompt)[0]
return rv
class InstanceDataImageDataset(Dataset):
def __init__(
self,
instance_data_image,
train_batch_size,
size=512,
):
self.value = process_image(Image.open(instance_data_image), size)
self.train_batch_size = train_batch_size
def __len__(self):
# Needed so a full batch of the data can be returned. Otherwise will return
# batches of size 1
return self.train_batch_size
def __getitem__(self, index):
return self.value
class HuggingFaceDataset(Dataset):
def __init__(
self,
hf_dataset,
tokenizer,
image_key,
prompt_key,
prompt_prefix=None,
size=512,
):
self.size = size
self.image_key = image_key
self.prompt_key = prompt_key
self.tokenizer = tokenizer
self.hf_dataset = hf_dataset
self.prompt_prefix = prompt_prefix
def __len__(self):
return len(self.hf_dataset)
def __getitem__(self, index):
item = self.hf_dataset[index]
rv = process_image(item[self.image_key], self.size)
prompt = item[self.prompt_key]
if self.prompt_prefix is not None:
prompt = self.prompt_prefix + prompt
rv["prompt_input_ids"] = tokenize_prompt(self.tokenizer, prompt)[0]
return rv
def process_image(image, size):
image = exif_transpose(image)
if not image.mode == "RGB":
image = image.convert("RGB")
orig_height = image.height
orig_width = image.width
image = transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR)(image)
c_top, c_left, _, _ = transforms.RandomCrop.get_params(image, output_size=(size, size))
image = transforms.functional.crop(image, c_top, c_left, size, size)
image = transforms.ToTensor()(image)
micro_conds = torch.tensor(
[orig_width, orig_height, c_top, c_left, 6.0],
)
return {"image": image, "micro_conds": micro_conds}
def tokenize_prompt(tokenizer, prompt):
return tokenizer(
prompt,
truncation=True,
padding="max_length",
max_length=77,
return_tensors="pt",
).input_ids
def encode_prompt(text_encoder, input_ids):
outputs = text_encoder(input_ids, return_dict=True, output_hidden_states=True)
encoder_hidden_states = outputs.hidden_states[-2]
cond_embeds = outputs[0]
return encoder_hidden_states, cond_embeds
def main(args):
if args.allow_tf32:
torch.backends.cuda.matmul.allow_tf32 = True
logging_dir = Path(args.output_dir, args.logging_dir)
accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir)
accelerator = Accelerator(
gradient_accumulation_steps=args.gradient_accumulation_steps,
mixed_precision=args.mixed_precision,
log_with=args.report_to,
project_config=accelerator_project_config,
)
if accelerator.is_main_process:
os.makedirs(args.output_dir, exist_ok=True)
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state, main_process_only=False)
if accelerator.is_main_process:
accelerator.init_trackers("amused", config=vars(copy.deepcopy(args)))
if args.seed is not None:
set_seed(args.seed)
# TODO - will have to fix loading if training text encoder
text_encoder = CLIPTextModelWithProjection.from_pretrained(
args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, variant=args.variant
)
tokenizer = CLIPTokenizer.from_pretrained(
args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision, variant=args.variant
)
vq_model = VQModel.from_pretrained(
args.pretrained_model_name_or_path, subfolder="vqvae", revision=args.revision, variant=args.variant
)
if args.train_text_encoder:
if args.text_encoder_use_lora:
lora_config = LoraConfig(
r=args.text_encoder_lora_r,
lora_alpha=args.text_encoder_lora_alpha,
target_modules=args.text_encoder_lora_target_modules,
)
text_encoder.add_adapter(lora_config)
text_encoder.train()
text_encoder.requires_grad_(True)
else:
text_encoder.eval()
text_encoder.requires_grad_(False)
vq_model.requires_grad_(False)
model = UVit2DModel.from_pretrained(
args.pretrained_model_name_or_path,
subfolder="transformer",
revision=args.revision,
variant=args.variant,
)
if args.use_lora:
lora_config = LoraConfig(
r=args.lora_r,
lora_alpha=args.lora_alpha,
target_modules=args.lora_target_modules,
)
model.add_adapter(lora_config)
model.train()
if args.gradient_checkpointing:
model.enable_gradient_checkpointing()
if args.train_text_encoder:
text_encoder.gradient_checkpointing_enable()
if args.use_ema:
ema = EMAModel(
model.parameters(),
decay=args.ema_decay,
update_after_step=args.ema_update_after_step,
model_cls=UVit2DModel,
model_config=model.config,
)
def save_model_hook(models, weights, output_dir):
if accelerator.is_main_process:
transformer_lora_layers_to_save = None
text_encoder_lora_layers_to_save = None
for model_ in models:
if isinstance(model_, type(accelerator.unwrap_model(model))):
if args.use_lora:
transformer_lora_layers_to_save = get_peft_model_state_dict(model_)
else:
model_.save_pretrained(os.path.join(output_dir, "transformer"))
elif isinstance(model_, type(accelerator.unwrap_model(text_encoder))):
if args.text_encoder_use_lora:
text_encoder_lora_layers_to_save = get_peft_model_state_dict(model_)
else:
model_.save_pretrained(os.path.join(output_dir, "text_encoder"))
else:
raise ValueError(f"unexpected save model: {model_.__class__}")
# make sure to pop weight so that corresponding model is not saved again
weights.pop()
if transformer_lora_layers_to_save is not None or text_encoder_lora_layers_to_save is not None:
LoraLoaderMixin.save_lora_weights(
output_dir,
transformer_lora_layers=transformer_lora_layers_to_save,
text_encoder_lora_layers=text_encoder_lora_layers_to_save,
)
if args.use_ema:
ema.save_pretrained(os.path.join(output_dir, "ema_model"))
def load_model_hook(models, input_dir):
transformer = None
text_encoder_ = None
while len(models) > 0:
model_ = models.pop()
if isinstance(model_, type(accelerator.unwrap_model(model))):
if args.use_lora:
transformer = model_
else:
load_model = UVit2DModel.from_pretrained(os.path.join(input_dir, "transformer"))
model_.load_state_dict(load_model.state_dict())
del load_model
elif isinstance(model, type(accelerator.unwrap_model(text_encoder))):
if args.text_encoder_use_lora:
text_encoder_ = model_
else:
load_model = CLIPTextModelWithProjection.from_pretrained(os.path.join(input_dir, "text_encoder"))
model_.load_state_dict(load_model.state_dict())
del load_model
else:
raise ValueError(f"unexpected save model: {model.__class__}")
if transformer is not None or text_encoder_ is not None:
lora_state_dict, network_alphas = LoraLoaderMixin.lora_state_dict(input_dir)
LoraLoaderMixin.load_lora_into_text_encoder(
lora_state_dict, network_alphas=network_alphas, text_encoder=text_encoder_
)
LoraLoaderMixin.load_lora_into_transformer(
lora_state_dict, network_alphas=network_alphas, transformer=transformer
)
if args.use_ema:
load_from = EMAModel.from_pretrained(os.path.join(input_dir, "ema_model"), model_cls=UVit2DModel)
ema.load_state_dict(load_from.state_dict())
del load_from
accelerator.register_load_state_pre_hook(load_model_hook)
accelerator.register_save_state_pre_hook(save_model_hook)
if args.scale_lr:
args.learning_rate = (
args.learning_rate * args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
)
if args.use_8bit_adam:
try:
import bitsandbytes as bnb
except ImportError:
raise ImportError(
"Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`"
)
optimizer_cls = bnb.optim.AdamW8bit
else:
optimizer_cls = torch.optim.AdamW
# no decay on bias and layernorm and embedding
no_decay = ["bias", "layer_norm.weight", "mlm_ln.weight", "embeddings.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.adam_weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
if args.train_text_encoder:
optimizer_grouped_parameters.append(
{"params": text_encoder.parameters(), "weight_decay": args.adam_weight_decay}
)
optimizer = optimizer_cls(
optimizer_grouped_parameters,
lr=args.learning_rate,
betas=(args.adam_beta1, args.adam_beta2),
weight_decay=args.adam_weight_decay,
eps=args.adam_epsilon,
)
logger.info("Creating dataloaders and lr_scheduler")
total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
if args.instance_data_dir is not None:
dataset = InstanceDataRootDataset(
instance_data_root=args.instance_data_dir,
tokenizer=tokenizer,
size=args.resolution,
)
elif args.instance_data_image is not None:
dataset = InstanceDataImageDataset(
instance_data_image=args.instance_data_image,
train_batch_size=args.train_batch_size,
size=args.resolution,
)
elif args.instance_data_dataset is not None:
dataset = HuggingFaceDataset(
hf_dataset=load_dataset(args.instance_data_dataset, split="train"),
tokenizer=tokenizer,
image_key=args.image_key,
prompt_key=args.prompt_key,
prompt_prefix=args.prompt_prefix,
size=args.resolution,
)
else:
assert False
train_dataloader = DataLoader(
dataset,
batch_size=args.train_batch_size,
shuffle=True,
num_workers=args.dataloader_num_workers,
collate_fn=default_collate,
)
train_dataloader.num_batches = len(train_dataloader)
lr_scheduler = diffusers.optimization.get_scheduler(
args.lr_scheduler,
optimizer=optimizer,
num_training_steps=args.max_train_steps * accelerator.num_processes,
num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes,
)
logger.info("Preparing model, optimizer and dataloaders")
if args.train_text_encoder:
model, optimizer, lr_scheduler, train_dataloader, text_encoder = accelerator.prepare(
model, optimizer, lr_scheduler, train_dataloader, text_encoder
)
else:
model, optimizer, lr_scheduler, train_dataloader = accelerator.prepare(
model, optimizer, lr_scheduler, train_dataloader
)
train_dataloader.num_batches = len(train_dataloader)
weight_dtype = torch.float32
if accelerator.mixed_precision == "fp16":
weight_dtype = torch.float16
elif accelerator.mixed_precision == "bf16":
weight_dtype = torch.bfloat16
if not args.train_text_encoder:
text_encoder.to(device=accelerator.device, dtype=weight_dtype)
vq_model.to(device=accelerator.device)
if args.use_ema:
ema.to(accelerator.device)
with nullcontext() if args.train_text_encoder else torch.no_grad():
empty_embeds, empty_clip_embeds = encode_prompt(
text_encoder, tokenize_prompt(tokenizer, "").to(text_encoder.device, non_blocking=True)
)
# There is a single image, we can just pre-encode the single prompt
if args.instance_data_image is not None:
prompt = os.path.splitext(os.path.basename(args.instance_data_image))[0]
encoder_hidden_states, cond_embeds = encode_prompt(
text_encoder, tokenize_prompt(tokenizer, prompt).to(text_encoder.device, non_blocking=True)
)
encoder_hidden_states = encoder_hidden_states.repeat(args.train_batch_size, 1, 1)
cond_embeds = cond_embeds.repeat(args.train_batch_size, 1)
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
num_update_steps_per_epoch = math.ceil(train_dataloader.num_batches / args.gradient_accumulation_steps)
# Afterwards we recalculate our number of training epochs.
# Note: We are not doing epoch based training here, but just using this for book keeping and being able to
# reuse the same training loop with other datasets/loaders.
num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
# Train!
logger.info("***** Running training *****")
logger.info(f" Num training steps = {args.max_train_steps}")
logger.info(f" Instantaneous batch size per device = { args.train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
resume_from_checkpoint = args.resume_from_checkpoint
if resume_from_checkpoint:
if resume_from_checkpoint == "latest":
# Get the most recent checkpoint
dirs = os.listdir(args.output_dir)
dirs = [d for d in dirs if d.startswith("checkpoint")]
dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
if len(dirs) > 0:
resume_from_checkpoint = os.path.join(args.output_dir, dirs[-1])
else:
resume_from_checkpoint = None
if resume_from_checkpoint is None:
accelerator.print(
f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run."
)
else:
accelerator.print(f"Resuming from checkpoint {resume_from_checkpoint}")
if resume_from_checkpoint is None:
global_step = 0
first_epoch = 0
else:
accelerator.load_state(resume_from_checkpoint)
global_step = int(os.path.basename(resume_from_checkpoint).split("-")[1])
first_epoch = global_step // num_update_steps_per_epoch
# As stated above, we are not doing epoch based training here, but just using this for book keeping and being able to
# reuse the same training loop with other datasets/loaders.
for epoch in range(first_epoch, num_train_epochs):
for batch in train_dataloader:
with torch.no_grad():
micro_conds = batch["micro_conds"].to(accelerator.device, non_blocking=True)
pixel_values = batch["image"].to(accelerator.device, non_blocking=True)
batch_size = pixel_values.shape[0]
split_batch_size = args.split_vae_encode if args.split_vae_encode is not None else batch_size
num_splits = math.ceil(batch_size / split_batch_size)
image_tokens = []
for i in range(num_splits):
start_idx = i * split_batch_size
end_idx = min((i + 1) * split_batch_size, batch_size)
bs = pixel_values.shape[0]
image_tokens.append(
vq_model.quantize(vq_model.encode(pixel_values[start_idx:end_idx]).latents)[2][2].reshape(
bs, -1
)
)
image_tokens = torch.cat(image_tokens, dim=0)
batch_size, seq_len = image_tokens.shape
timesteps = torch.rand(batch_size, device=image_tokens.device)
mask_prob = torch.cos(timesteps * math.pi * 0.5)
mask_prob = mask_prob.clip(args.min_masking_rate)
num_token_masked = (seq_len * mask_prob).round().clamp(min=1)
batch_randperm = torch.rand(batch_size, seq_len, device=image_tokens.device).argsort(dim=-1)
mask = batch_randperm < num_token_masked.unsqueeze(-1)
mask_id = accelerator.unwrap_model(model).config.vocab_size - 1
input_ids = torch.where(mask, mask_id, image_tokens)
labels = torch.where(mask, image_tokens, -100)
if args.cond_dropout_prob > 0.0:
assert encoder_hidden_states is not None
batch_size = encoder_hidden_states.shape[0]
mask = (
torch.zeros((batch_size, 1, 1), device=encoder_hidden_states.device).float().uniform_(0, 1)
< args.cond_dropout_prob
)
empty_embeds_ = empty_embeds.expand(batch_size, -1, -1)
encoder_hidden_states = torch.where(
(encoder_hidden_states * mask).bool(), encoder_hidden_states, empty_embeds_
)
empty_clip_embeds_ = empty_clip_embeds.expand(batch_size, -1)
cond_embeds = torch.where((cond_embeds * mask.squeeze(-1)).bool(), cond_embeds, empty_clip_embeds_)
bs = input_ids.shape[0]
vae_scale_factor = 2 ** (len(vq_model.config.block_out_channels) - 1)
resolution = args.resolution // vae_scale_factor
input_ids = input_ids.reshape(bs, resolution, resolution)
if "prompt_input_ids" in batch:
with nullcontext() if args.train_text_encoder else torch.no_grad():
encoder_hidden_states, cond_embeds = encode_prompt(
text_encoder, batch["prompt_input_ids"].to(accelerator.device, non_blocking=True)
)
# Train Step
with accelerator.accumulate(model):
codebook_size = accelerator.unwrap_model(model).config.codebook_size
logits = (
model(
input_ids=input_ids,
encoder_hidden_states=encoder_hidden_states,
micro_conds=micro_conds,
pooled_text_emb=cond_embeds,
)
.reshape(bs, codebook_size, -1)
.permute(0, 2, 1)
.reshape(-1, codebook_size)
)
loss = F.cross_entropy(
logits,
labels.view(-1),
ignore_index=-100,
reduction="mean",
)
# Gather the losses across all processes for logging (if we use distributed training).
avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean()
avg_masking_rate = accelerator.gather(mask_prob.repeat(args.train_batch_size)).mean()
accelerator.backward(loss)
if args.max_grad_norm is not None and accelerator.sync_gradients:
accelerator.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad(set_to_none=True)
# Checks if the accelerator has performed an optimization step behind the scenes
if accelerator.sync_gradients:
if args.use_ema:
ema.step(model.parameters())
if (global_step + 1) % args.logging_steps == 0:
logs = {
"step_loss": avg_loss.item(),
"lr": lr_scheduler.get_last_lr()[0],
"avg_masking_rate": avg_masking_rate.item(),
}
accelerator.log(logs, step=global_step + 1)
logger.info(
f"Step: {global_step + 1} "
f"Loss: {avg_loss.item():0.4f} "
f"LR: {lr_scheduler.get_last_lr()[0]:0.6f}"
)
if (global_step + 1) % args.checkpointing_steps == 0:
save_checkpoint(args, accelerator, global_step + 1)
if (global_step + 1) % args.validation_steps == 0 and accelerator.is_main_process:
if args.use_ema:
ema.store(model.parameters())
ema.copy_to(model.parameters())
with torch.no_grad():
logger.info("Generating images...")
model.eval()
if args.train_text_encoder:
text_encoder.eval()
scheduler = AmusedScheduler.from_pretrained(
args.pretrained_model_name_or_path,
subfolder="scheduler",
revision=args.revision,
variant=args.variant,
)
pipe = AmusedPipeline(
transformer=accelerator.unwrap_model(model),
tokenizer=tokenizer,
text_encoder=text_encoder,
vqvae=vq_model,
scheduler=scheduler,
)
pil_images = pipe(prompt=args.validation_prompts).images
wandb_images = [
wandb.Image(image, caption=args.validation_prompts[i])
for i, image in enumerate(pil_images)
]
wandb.log({"generated_images": wandb_images}, step=global_step + 1)
model.train()
if args.train_text_encoder:
text_encoder.train()
if args.use_ema:
ema.restore(model.parameters())
global_step += 1
# Stop training if max steps is reached
if global_step >= args.max_train_steps:
break
# End for
accelerator.wait_for_everyone()
# Evaluate and save checkpoint at the end of training
save_checkpoint(args, accelerator, global_step)
# Save the final trained checkpoint
if accelerator.is_main_process:
model = accelerator.unwrap_model(model)
if args.use_ema:
ema.copy_to(model.parameters())
model.save_pretrained(args.output_dir)
accelerator.end_training()
def save_checkpoint(args, accelerator, global_step):
output_dir = args.output_dir
# _before_ saving state, check if this save would set us over the `checkpoints_total_limit`
if accelerator.is_main_process and args.checkpoints_total_limit is not None:
checkpoints = os.listdir(output_dir)
checkpoints = [d for d in checkpoints if d.startswith("checkpoint")]
checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1]))
# before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints
if len(checkpoints) >= args.checkpoints_total_limit:
num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1
removing_checkpoints = checkpoints[0:num_to_remove]
logger.info(
f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints"
)
logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}")
for removing_checkpoint in removing_checkpoints:
removing_checkpoint = os.path.join(output_dir, removing_checkpoint)
shutil.rmtree(removing_checkpoint)
save_path = Path(output_dir) / f"checkpoint-{global_step}"
accelerator.save_state(save_path)
logger.info(f"Saved state to {save_path}")
if __name__ == "__main__":
main(parse_args())
| diffusers/examples/amused/train_amused.py/0 | {
"file_path": "diffusers/examples/amused/train_amused.py",
"repo_id": "diffusers",
"token_count": 17459
} | 108 |
import inspect
import time
from pathlib import Path
from typing import Callable, List, Optional, Union
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNet2DConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, logging
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
def slerp(t, v0, v1, DOT_THRESHOLD=0.9995):
"""helper function to spherically interpolate two arrays v1 v2"""
if not isinstance(v0, np.ndarray):
inputs_are_torch = True
input_device = v0.device
v0 = v0.cpu().numpy()
v1 = v1.cpu().numpy()
dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1)))
if np.abs(dot) > DOT_THRESHOLD:
v2 = (1 - t) * v0 + t * v1
else:
theta_0 = np.arccos(dot)
sin_theta_0 = np.sin(theta_0)
theta_t = theta_0 * t
sin_theta_t = np.sin(theta_t)
s0 = np.sin(theta_0 - theta_t) / sin_theta_0
s1 = sin_theta_t / sin_theta_0
v2 = s0 * v0 + s1 * v1
if inputs_are_torch:
v2 = torch.from_numpy(v2).to(input_device)
return v2
class StableDiffusionWalkPipeline(DiffusionPipeline, StableDiffusionMixin):
r"""
Pipeline for text-to-image generation using Stable Diffusion.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
Args:
vae ([`AutoencoderKL`]):
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
text_encoder ([`CLIPTextModel`]):
Frozen text-encoder. Stable Diffusion uses the text portion of
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
tokenizer (`CLIPTokenizer`):
Tokenizer of class
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
safety_checker ([`StableDiffusionSafetyChecker`]):
Classification module that estimates whether generated images could be considered offensive or harmful.
Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details.
feature_extractor ([`CLIPImageProcessor`]):
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
"""
def __init__(
self,
vae: AutoencoderKL,
text_encoder: CLIPTextModel,
tokenizer: CLIPTokenizer,
unet: UNet2DConditionModel,
scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
safety_checker: StableDiffusionSafetyChecker,
feature_extractor: CLIPImageProcessor,
):
super().__init__()
if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
deprecation_message = (
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
new_config = dict(scheduler.config)
new_config["steps_offset"] = 1
scheduler._internal_dict = FrozenDict(new_config)
if safety_checker is None:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
)
self.register_modules(
vae=vae,
text_encoder=text_encoder,
tokenizer=tokenizer,
unet=unet,
scheduler=scheduler,
safety_checker=safety_checker,
feature_extractor=feature_extractor,
)
@torch.no_grad()
def __call__(
self,
prompt: Optional[Union[str, List[str]]] = None,
height: int = 512,
width: int = 512,
num_inference_steps: int = 50,
guidance_scale: float = 7.5,
negative_prompt: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[torch.Generator] = None,
latents: Optional[torch.FloatTensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
callback_steps: int = 1,
text_embeddings: Optional[torch.FloatTensor] = None,
**kwargs,
):
r"""
Function invoked when calling the pipeline for generation.
Args:
prompt (`str` or `List[str]`, *optional*, defaults to `None`):
The prompt or prompts to guide the image generation. If not provided, `text_embeddings` is required.
height (`int`, *optional*, defaults to 512):
The height in pixels of the generated image.
width (`int`, *optional*, defaults to 512):
The width in pixels of the generated image.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
guidance_scale (`float`, *optional*, defaults to 7.5):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
if `guidance_scale` is less than `1`).
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
[`schedulers.DDIMScheduler`], will be ignored for others.
generator (`torch.Generator`, *optional*):
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
deterministic.
latents (`torch.FloatTensor`, *optional*):
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor will ge generated by sampling using the supplied random `generator`.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
plain tuple.
callback (`Callable`, *optional*):
A function that will be called every `callback_steps` steps during inference. The function will be
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
callback_steps (`int`, *optional*, defaults to 1):
The frequency at which the `callback` function will be called. If not specified, the callback will be
called at every step.
text_embeddings (`torch.FloatTensor`, *optional*, defaults to `None`):
Pre-generated text embeddings to be used as inputs for image generation. Can be used in place of
`prompt` to avoid re-computing the embeddings. If not provided, the embeddings will be generated from
the supplied `prompt`.
Returns:
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
When returning a tuple, the first element is a list with the generated images, and the second element is a
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
(nsfw) content, according to the `safety_checker`.
"""
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(callback_steps)}."
)
if text_embeddings is None:
if isinstance(prompt, str):
batch_size = 1
elif isinstance(prompt, list):
batch_size = len(prompt)
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
# get prompt text embeddings
text_inputs = self.tokenizer(
prompt,
padding="max_length",
max_length=self.tokenizer.model_max_length,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
print(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
)
text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]
text_embeddings = self.text_encoder(text_input_ids.to(self.device))[0]
else:
batch_size = text_embeddings.shape[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
bs_embed, seq_len, _ = text_embeddings.shape
text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
do_classifier_free_guidance = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
uncond_tokens: List[str]
if negative_prompt is None:
uncond_tokens = [""] * batch_size
elif type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
f" {type(prompt)}."
)
elif isinstance(negative_prompt, str):
uncond_tokens = [negative_prompt]
elif batch_size != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`."
)
else:
uncond_tokens = negative_prompt
max_length = self.tokenizer.model_max_length
uncond_input = self.tokenizer(
uncond_tokens,
padding="max_length",
max_length=max_length,
truncation=True,
return_tensors="pt",
)
uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
seq_len = uncond_embeddings.shape[1]
uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1)
uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
latents_shape = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
latents_dtype = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
self.device
)
else:
latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
latents = latents.to(self.device)
# set timesteps
self.scheduler.set_timesteps(num_inference_steps)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
timesteps_tensor = self.scheduler.timesteps.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
latents = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
extra_step_kwargs = {}
if accepts_eta:
extra_step_kwargs["eta"] = eta
for i, t in enumerate(self.progress_bar(timesteps_tensor)):
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
# predict the noise residual
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
# perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
step_idx = i // getattr(self.scheduler, "order", 1)
callback(step_idx, t, latents)
latents = 1 / 0.18215 * latents
image = self.vae.decode(latents).sample
image = (image / 2 + 0.5).clamp(0, 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
if self.safety_checker is not None:
safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(
self.device
)
image, has_nsfw_concept = self.safety_checker(
images=image, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype)
)
else:
has_nsfw_concept = None
if output_type == "pil":
image = self.numpy_to_pil(image)
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
def embed_text(self, text):
"""takes in text and turns it into text embeddings"""
text_input = self.tokenizer(
text,
padding="max_length",
max_length=self.tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
with torch.no_grad():
embed = self.text_encoder(text_input.input_ids.to(self.device))[0]
return embed
def get_noise(self, seed, dtype=torch.float32, height=512, width=512):
"""Takes in random seed and returns corresponding noise vector"""
return torch.randn(
(1, self.unet.config.in_channels, height // 8, width // 8),
generator=torch.Generator(device=self.device).manual_seed(seed),
device=self.device,
dtype=dtype,
)
def walk(
self,
prompts: List[str],
seeds: List[int],
num_interpolation_steps: Optional[int] = 6,
output_dir: Optional[str] = "./dreams",
name: Optional[str] = None,
batch_size: Optional[int] = 1,
height: Optional[int] = 512,
width: Optional[int] = 512,
guidance_scale: Optional[float] = 7.5,
num_inference_steps: Optional[int] = 50,
eta: Optional[float] = 0.0,
) -> List[str]:
"""
Walks through a series of prompts and seeds, interpolating between them and saving the results to disk.
Args:
prompts (`List[str]`):
List of prompts to generate images for.
seeds (`List[int]`):
List of seeds corresponding to provided prompts. Must be the same length as prompts.
num_interpolation_steps (`int`, *optional*, defaults to 6):
Number of interpolation steps to take between prompts.
output_dir (`str`, *optional*, defaults to `./dreams`):
Directory to save the generated images to.
name (`str`, *optional*, defaults to `None`):
Subdirectory of `output_dir` to save the generated images to. If `None`, the name will
be the current time.
batch_size (`int`, *optional*, defaults to 1):
Number of images to generate at once.
height (`int`, *optional*, defaults to 512):
Height of the generated images.
width (`int`, *optional*, defaults to 512):
Width of the generated images.
guidance_scale (`float`, *optional*, defaults to 7.5):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
[`schedulers.DDIMScheduler`], will be ignored for others.
Returns:
`List[str]`: List of paths to the generated images.
"""
if not len(prompts) == len(seeds):
raise ValueError(
f"Number of prompts and seeds must be equalGot {len(prompts)} prompts and {len(seeds)} seeds"
)
name = name or time.strftime("%Y%m%d-%H%M%S")
save_path = Path(output_dir) / name
save_path.mkdir(exist_ok=True, parents=True)
frame_idx = 0
frame_filepaths = []
for prompt_a, prompt_b, seed_a, seed_b in zip(prompts, prompts[1:], seeds, seeds[1:]):
# Embed Text
embed_a = self.embed_text(prompt_a)
embed_b = self.embed_text(prompt_b)
# Get Noise
noise_dtype = embed_a.dtype
noise_a = self.get_noise(seed_a, noise_dtype, height, width)
noise_b = self.get_noise(seed_b, noise_dtype, height, width)
noise_batch, embeds_batch = None, None
T = np.linspace(0.0, 1.0, num_interpolation_steps)
for i, t in enumerate(T):
noise = slerp(float(t), noise_a, noise_b)
embed = torch.lerp(embed_a, embed_b, t)
noise_batch = noise if noise_batch is None else torch.cat([noise_batch, noise], dim=0)
embeds_batch = embed if embeds_batch is None else torch.cat([embeds_batch, embed], dim=0)
batch_is_ready = embeds_batch.shape[0] == batch_size or i + 1 == T.shape[0]
if batch_is_ready:
outputs = self(
latents=noise_batch,
text_embeddings=embeds_batch,
height=height,
width=width,
guidance_scale=guidance_scale,
eta=eta,
num_inference_steps=num_inference_steps,
)
noise_batch, embeds_batch = None, None
for image in outputs["images"]:
frame_filepath = str(save_path / f"frame_{frame_idx:06d}.png")
image.save(frame_filepath)
frame_filepaths.append(frame_filepath)
frame_idx += 1
return frame_filepaths
| diffusers/examples/community/interpolate_stable_diffusion.py/0 | {
"file_path": "diffusers/examples/community/interpolate_stable_diffusion.py",
"repo_id": "diffusers",
"token_count": 11221
} | 109 |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
from diffusers.loaders import IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
from diffusers.models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel, UNetMotionModel
from diffusers.models.lora import adjust_lora_scale_text_encoder
from diffusers.models.unets.unet_motion_model import MotionAdapter
from diffusers.pipelines.animatediff.pipeline_output import AnimateDiffPipelineOutput
from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
from diffusers.schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from diffusers.utils import USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers
from diffusers.utils.torch_utils import is_compiled_module, randn_tensor
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
EXAMPLE_DOC_STRING = """
Examples:
```py
>>> import torch
>>> from diffusers import AutoencoderKL, ControlNetModel, MotionAdapter
>>> from diffusers.pipelines import DiffusionPipeline
>>> from diffusers.schedulers import DPMSolverMultistepScheduler
>>> from PIL import Image
>>> motion_id = "guoyww/animatediff-motion-adapter-v1-5-2"
>>> adapter = MotionAdapter.from_pretrained(motion_id)
>>> controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_openpose", torch_dtype=torch.float16)
>>> vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", torch_dtype=torch.float16)
>>> model_id = "SG161222/Realistic_Vision_V5.1_noVAE"
>>> pipe = DiffusionPipeline.from_pretrained(
... model_id,
... motion_adapter=adapter,
... controlnet=controlnet,
... vae=vae,
... custom_pipeline="pipeline_animatediff_controlnet",
... ).to(device="cuda", dtype=torch.float16)
>>> pipe.scheduler = DPMSolverMultistepScheduler.from_pretrained(
... model_id, subfolder="scheduler", clip_sample=False, timestep_spacing="linspace", steps_offset=1, beta_schedule="linear",
... )
>>> pipe.enable_vae_slicing()
>>> conditioning_frames = []
>>> for i in range(1, 16 + 1):
... conditioning_frames.append(Image.open(f"frame_{i}.png"))
>>> prompt = "astronaut in space, dancing"
>>> negative_prompt = "bad quality, worst quality, jpeg artifacts, ugly"
>>> result = pipe(
... prompt=prompt,
... negative_prompt=negative_prompt,
... width=512,
... height=768,
... conditioning_frames=conditioning_frames,
... num_inference_steps=12,
... )
>>> from diffusers.utils import export_to_gif
>>> export_to_gif(result.frames[0], "result.gif")
```
"""
# Copied from diffusers.pipelines.animatediff.pipeline_animatediff.tensor2vid
def tensor2vid(video: torch.Tensor, processor, output_type="np"):
batch_size, channels, num_frames, height, width = video.shape
outputs = []
for batch_idx in range(batch_size):
batch_vid = video[batch_idx].permute(1, 0, 2, 3)
batch_output = processor.postprocess(batch_vid, output_type)
outputs.append(batch_output)
if output_type == "np":
outputs = np.stack(outputs)
elif output_type == "pt":
outputs = torch.stack(outputs)
elif not output_type == "pil":
raise ValueError(f"{output_type} does not exist. Please choose one of ['np', 'pt', 'pil']")
return outputs
class AnimateDiffControlNetPipeline(
DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, IPAdapterMixin, LoraLoaderMixin
):
r"""
Pipeline for text-to-video generation.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
The pipeline also inherits the following loading methods:
- [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
- [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
- [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
- [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
Args:
vae ([`AutoencoderKL`]):
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
text_encoder ([`CLIPTextModel`]):
Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
tokenizer (`CLIPTokenizer`):
A [`~transformers.CLIPTokenizer`] to tokenize text.
unet ([`UNet2DConditionModel`]):
A [`UNet2DConditionModel`] used to create a UNetMotionModel to denoise the encoded video latents.
motion_adapter ([`MotionAdapter`]):
A [`MotionAdapter`] to be used in combination with `unet` to denoise the encoded video latents.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
"""
model_cpu_offload_seq = "text_encoder->unet->vae"
_optional_components = ["feature_extractor", "image_encoder"]
_callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
def __init__(
self,
vae: AutoencoderKL,
text_encoder: CLIPTextModel,
tokenizer: CLIPTokenizer,
unet: UNet2DConditionModel,
motion_adapter: MotionAdapter,
controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel],
scheduler: Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
],
feature_extractor: Optional[CLIPImageProcessor] = None,
image_encoder: Optional[CLIPVisionModelWithProjection] = None,
):
super().__init__()
unet = UNetMotionModel.from_unet2d(unet, motion_adapter)
if isinstance(controlnet, (list, tuple)):
controlnet = MultiControlNetModel(controlnet)
self.register_modules(
vae=vae,
text_encoder=text_encoder,
tokenizer=tokenizer,
unet=unet,
motion_adapter=motion_adapter,
controlnet=controlnet,
scheduler=scheduler,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
)
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
self.control_image_processor = VaeImageProcessor(
vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
)
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt with num_images_per_prompt -> num_videos_per_prompt
def encode_prompt(
self,
prompt,
device,
num_images_per_prompt,
do_classifier_free_guidance,
negative_prompt=None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
lora_scale: Optional[float] = None,
clip_skip: Optional[int] = None,
):
r"""
Encodes the prompt into text encoder hidden states.
Args:
prompt (`str` or `List[str]`, *optional*):
prompt to be encoded
device: (`torch.device`):
torch device
num_images_per_prompt (`int`):
number of images that should be generated per prompt
do_classifier_free_guidance (`bool`):
whether to use classifier free guidance or not
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
lora_scale (`float`, *optional*):
A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
clip_skip (`int`, *optional*):
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
the output of the pre-final layer will be used for computing the prompt embeddings.
"""
# set lora scale so that monkey patched LoRA
# function of text encoder can correctly access it
if lora_scale is not None and isinstance(self, LoraLoaderMixin):
self._lora_scale = lora_scale
# dynamically adjust the LoRA scale
if not USE_PEFT_BACKEND:
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
else:
scale_lora_layers(self.text_encoder, lora_scale)
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
if prompt_embeds is None:
# textual inversion: process multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin):
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
text_inputs = self.tokenizer(
prompt,
padding="max_length",
max_length=self.tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
text_input_ids, untruncated_ids
):
removed_text = self.tokenizer.batch_decode(
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
)
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
)
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
attention_mask = text_inputs.attention_mask.to(device)
else:
attention_mask = None
if clip_skip is None:
prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
prompt_embeds = prompt_embeds[0]
else:
prompt_embeds = self.text_encoder(
text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
)
# Access the `hidden_states` first, that contains a tuple of
# all the hidden states from the encoder layers. Then index into
# the tuple to access the hidden states from the desired layer.
prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
# We also need to apply the final LayerNorm here to not mess with the
# representations. The `last_hidden_states` that we typically use for
# obtaining the final prompt representations passes through the LayerNorm
# layer.
prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
if self.text_encoder is not None:
prompt_embeds_dtype = self.text_encoder.dtype
elif self.unet is not None:
prompt_embeds_dtype = self.unet.dtype
else:
prompt_embeds_dtype = prompt_embeds.dtype
prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
bs_embed, seq_len, _ = prompt_embeds.shape
# duplicate text embeddings for each generation per prompt, using mps friendly method
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance and negative_prompt_embeds is None:
uncond_tokens: List[str]
if negative_prompt is None:
uncond_tokens = [""] * batch_size
elif prompt is not None and type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
f" {type(prompt)}."
)
elif isinstance(negative_prompt, str):
uncond_tokens = [negative_prompt]
elif batch_size != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`."
)
else:
uncond_tokens = negative_prompt
# textual inversion: process multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin):
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
max_length = prompt_embeds.shape[1]
uncond_input = self.tokenizer(
uncond_tokens,
padding="max_length",
max_length=max_length,
truncation=True,
return_tensors="pt",
)
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
attention_mask = uncond_input.attention_mask.to(device)
else:
attention_mask = None
negative_prompt_embeds = self.text_encoder(
uncond_input.input_ids.to(device),
attention_mask=attention_mask,
)
negative_prompt_embeds = negative_prompt_embeds[0]
if do_classifier_free_guidance:
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
seq_len = negative_prompt_embeds.shape[1]
negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder, lora_scale)
return prompt_embeds, negative_prompt_embeds
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
def encode_image(self, image, device, num_images_per_prompt):
dtype = next(self.image_encoder.parameters()).dtype
if not isinstance(image, torch.Tensor):
image = self.feature_extractor(image, return_tensors="pt").pixel_values
image = image.to(device=device, dtype=dtype)
image_embeds = self.image_encoder(image).image_embeds
image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
uncond_image_embeds = torch.zeros_like(image_embeds)
return image_embeds, uncond_image_embeds
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds
def prepare_ip_adapter_image_embeds(
self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt
):
if ip_adapter_image_embeds is None:
if not isinstance(ip_adapter_image, list):
ip_adapter_image = [ip_adapter_image]
if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers):
raise ValueError(
f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters."
)
image_embeds = []
for single_ip_adapter_image, image_proj_layer in zip(
ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers
):
output_hidden_state = not isinstance(image_proj_layer, ImageProjection)
single_image_embeds, single_negative_image_embeds = self.encode_image(
single_ip_adapter_image, device, 1, output_hidden_state
)
single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0)
single_negative_image_embeds = torch.stack(
[single_negative_image_embeds] * num_images_per_prompt, dim=0
)
if self.do_classifier_free_guidance:
single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
single_image_embeds = single_image_embeds.to(device)
image_embeds.append(single_image_embeds)
else:
image_embeds = ip_adapter_image_embeds
return image_embeds
# Copied from diffusers.pipelines.text_to_video_synthesis/pipeline_text_to_video_synth.TextToVideoSDPipeline.decode_latents
def decode_latents(self, latents):
latents = 1 / self.vae.config.scaling_factor * latents
batch_size, channels, num_frames, height, width = latents.shape
latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width)
image = self.vae.decode(latents).sample
video = (
image[None, :]
.reshape(
(
batch_size,
num_frames,
-1,
)
+ image.shape[2:]
)
.permute(0, 2, 1, 3, 4)
)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
video = video.float()
return video
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
def prepare_extra_step_kwargs(self, generator, eta):
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
extra_step_kwargs = {}
if accepts_eta:
extra_step_kwargs["eta"] = eta
# check if the scheduler accepts generator
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
extra_step_kwargs["generator"] = generator
return extra_step_kwargs
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs
def check_inputs(
self,
prompt,
height,
width,
num_frames,
callback_steps,
negative_prompt=None,
prompt_embeds=None,
negative_prompt_embeds=None,
callback_on_step_end_tensor_inputs=None,
image=None,
controlnet_conditioning_scale=1.0,
control_guidance_start=0.0,
control_guidance_end=1.0,
):
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(callback_steps)}."
)
if callback_on_step_end_tensor_inputs is not None and not all(
k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
):
raise ValueError(
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
)
if prompt is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
" only forward one of the two."
)
elif prompt is None and prompt_embeds is None:
raise ValueError(
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
)
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
if negative_prompt is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
if prompt_embeds is not None and negative_prompt_embeds is not None:
if prompt_embeds.shape != negative_prompt_embeds.shape:
raise ValueError(
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
f" {negative_prompt_embeds.shape}."
)
# `prompt` needs more sophisticated handling when there are multiple
# conditionings.
if isinstance(self.controlnet, MultiControlNetModel):
if isinstance(prompt, list):
logger.warning(
f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}"
" prompts. The conditionings will be fixed across the prompts."
)
# Check `image`
is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance(
self.controlnet, torch._dynamo.eval_frame.OptimizedModule
)
if (
isinstance(self.controlnet, ControlNetModel)
or is_compiled
and isinstance(self.controlnet._orig_mod, ControlNetModel)
):
if not isinstance(image, list):
raise TypeError(f"For single controlnet, `image` must be of type `list` but got {type(image)}")
if len(image) != num_frames:
raise ValueError(f"Excepted image to have length {num_frames} but got {len(image)=}")
elif (
isinstance(self.controlnet, MultiControlNetModel)
or is_compiled
and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
):
if not isinstance(image, list) or not isinstance(image[0], list):
raise TypeError(f"For multiple controlnets: `image` must be type list of lists but got {type(image)=}")
if len(image[0]) != num_frames:
raise ValueError(f"Expected length of image sublist as {num_frames} but got {len(image[0])=}")
if any(len(img) != len(image[0]) for img in image):
raise ValueError("All conditioning frame batches for multicontrolnet must be same size")
else:
assert False
# Check `controlnet_conditioning_scale`
if (
isinstance(self.controlnet, ControlNetModel)
or is_compiled
and isinstance(self.controlnet._orig_mod, ControlNetModel)
):
if not isinstance(controlnet_conditioning_scale, float):
raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
elif (
isinstance(self.controlnet, MultiControlNetModel)
or is_compiled
and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
):
if isinstance(controlnet_conditioning_scale, list):
if any(isinstance(i, list) for i in controlnet_conditioning_scale):
raise ValueError("A single batch of multiple conditionings are supported at the moment.")
elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(
self.controlnet.nets
):
raise ValueError(
"For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
" the same length as the number of controlnets"
)
else:
assert False
if not isinstance(control_guidance_start, (tuple, list)):
control_guidance_start = [control_guidance_start]
if not isinstance(control_guidance_end, (tuple, list)):
control_guidance_end = [control_guidance_end]
if len(control_guidance_start) != len(control_guidance_end):
raise ValueError(
f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
)
if isinstance(self.controlnet, MultiControlNetModel):
if len(control_guidance_start) != len(self.controlnet.nets):
raise ValueError(
f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}."
)
for start, end in zip(control_guidance_start, control_guidance_end):
if start >= end:
raise ValueError(
f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
)
if start < 0.0:
raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
if end > 1.0:
raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
# Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image
def check_image(self, image, prompt, prompt_embeds):
image_is_pil = isinstance(image, Image.Image)
image_is_tensor = isinstance(image, torch.Tensor)
image_is_np = isinstance(image, np.ndarray)
image_is_pil_list = isinstance(image, list) and isinstance(image[0], Image.Image)
image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray)
if (
not image_is_pil
and not image_is_tensor
and not image_is_np
and not image_is_pil_list
and not image_is_tensor_list
and not image_is_np_list
):
raise TypeError(
f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}"
)
if image_is_pil:
image_batch_size = 1
else:
image_batch_size = len(image)
if prompt is not None and isinstance(prompt, str):
prompt_batch_size = 1
elif prompt is not None and isinstance(prompt, list):
prompt_batch_size = len(prompt)
elif prompt_embeds is not None:
prompt_batch_size = prompt_embeds.shape[0]
if image_batch_size != 1 and image_batch_size != prompt_batch_size:
raise ValueError(
f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
)
# Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.prepare_latents
def prepare_latents(
self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None
):
shape = (
batch_size,
num_channels_latents,
num_frames,
height // self.vae_scale_factor,
width // self.vae_scale_factor,
)
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
)
if latents is None:
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
else:
latents = latents.to(device)
# scale the initial noise by the standard deviation required by the scheduler
latents = latents * self.scheduler.init_noise_sigma
return latents
# Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image
def prepare_image(
self,
image,
width,
height,
batch_size,
num_images_per_prompt,
device,
dtype,
do_classifier_free_guidance=False,
guess_mode=False,
):
image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
image_batch_size = image.shape[0]
if image_batch_size == 1:
repeat_by = batch_size
else:
# image batch size is the same as prompt batch size
repeat_by = num_images_per_prompt
image = image.repeat_interleave(repeat_by, dim=0)
image = image.to(device=device, dtype=dtype)
if do_classifier_free_guidance and not guess_mode:
image = torch.cat([image] * 2)
return image
@property
def guidance_scale(self):
return self._guidance_scale
@property
def clip_skip(self):
return self._clip_skip
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
@property
def do_classifier_free_guidance(self):
return self._guidance_scale > 1
@property
def cross_attention_kwargs(self):
return self._cross_attention_kwargs
@property
def num_timesteps(self):
return self._num_timesteps
@torch.no_grad()
def __call__(
self,
prompt: Union[str, List[str]] = None,
num_frames: Optional[int] = 16,
height: Optional[int] = None,
width: Optional[int] = None,
num_inference_steps: int = 50,
guidance_scale: float = 7.5,
negative_prompt: Optional[Union[str, List[str]]] = None,
num_videos_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
ip_adapter_image: Optional[PipelineImageInput] = None,
ip_adapter_image_embeds: Optional[PipelineImageInput] = None,
conditioning_frames: Optional[List[PipelineImageInput]] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
guess_mode: bool = False,
control_guidance_start: Union[float, List[float]] = 0.0,
control_guidance_end: Union[float, List[float]] = 1.0,
clip_skip: Optional[int] = None,
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
**kwargs,
):
r"""
The call function to the pipeline for generation.
Args:
prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
The height in pixels of the generated video.
width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
The width in pixels of the generated video.
num_frames (`int`, *optional*, defaults to 16):
The number of video frames that are generated. Defaults to 16 frames which at 8 frames per seconds
amounts to 2 seconds of video.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality videos at the
expense of slower inference.
guidance_scale (`float`, *optional*, defaults to 7.5):
A higher guidance scale value encourages the model to generate images closely linked to the text
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
generation deterministic.
latents (`torch.FloatTensor`, *optional*):
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor is generated by sampling using the supplied random `generator`. Latents should be of shape
`(batch_size, num_channel, num_frames, height, width)`.
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
provided, text embeddings are generated from the `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
ip_adapter_image (`PipelineImageInput`, *optional*):
Optional image input to work with IP Adapters.
ip_adapter_image_embeds (`List[torch.FloatTensor]`, *optional*):
Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters.
Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding
if `do_classifier_free_guidance` is set to `True`.
If not provided, embeddings are computed from the `ip_adapter_image` input argument.
conditioning_frames (`List[PipelineImageInput]`, *optional*):
The ControlNet input condition to provide guidance to the `unet` for generation. If multiple ControlNets
are specified, images must be passed as a list such that each element of the list can be correctly
batched for input to a single ControlNet.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generated video. Choose between `torch.FloatTensor`, `PIL.Image` or
`np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] instead
of a plain tuple.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added
to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set
the corresponding scale as a list.
guess_mode (`bool`, *optional*, defaults to `False`):
The ControlNet encoder tries to recognize the content of the input image even if you remove all
prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended.
control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
The percentage of total steps at which the ControlNet starts applying.
control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
The percentage of total steps at which the ControlNet stops applying.
clip_skip (`int`, *optional*):
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
the output of the pre-final layer will be used for computing the prompt embeddings.
allback_on_step_end (`Callable`, *optional*):
A function that calls at the end of each denoising steps during the inference. The function is called
with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
`callback_on_step_end_tensor_inputs`.
callback_on_step_end_tensor_inputs (`List`, *optional*):
The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
`._callback_tensor_inputs` attribute of your pipeine class.
Examples:
Returns:
[`~pipelines.animatediff.pipeline_output.AnimateDiffPipelineOutput`] or `tuple`:
If `return_dict` is `True`, [`~pipelines.animatediff.pipeline_output.AnimateDiffPipelineOutput`] is
returned, otherwise a `tuple` is returned where the first element is a list with the generated frames.
"""
callback = kwargs.pop("callback", None)
callback_steps = kwargs.pop("callback_steps", None)
if callback is not None:
deprecate(
"callback",
"1.0.0",
"Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
)
if callback_steps is not None:
deprecate(
"callback_steps",
"1.0.0",
"Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
)
controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
# align format for control guidance
if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
control_guidance_start = len(control_guidance_end) * [control_guidance_start]
elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
control_guidance_end = len(control_guidance_start) * [control_guidance_end]
elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
control_guidance_start, control_guidance_end = (
mult * [control_guidance_start],
mult * [control_guidance_end],
)
# 0. Default height and width to unet
height = height or self.unet.config.sample_size * self.vae_scale_factor
width = width or self.unet.config.sample_size * self.vae_scale_factor
num_videos_per_prompt = 1
# 1. Check inputs. Raise error if not correct
self.check_inputs(
prompt=prompt,
height=height,
width=width,
num_frames=num_frames,
callback_steps=callback_steps,
negative_prompt=negative_prompt,
callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
image=conditioning_frames,
controlnet_conditioning_scale=controlnet_conditioning_scale,
control_guidance_start=control_guidance_start,
control_guidance_end=control_guidance_end,
)
self._guidance_scale = guidance_scale
self._clip_skip = clip_skip
self._cross_attention_kwargs = cross_attention_kwargs
# 2. Define call parameters
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
device = self._execution_device
if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
global_pool_conditions = (
controlnet.config.global_pool_conditions
if isinstance(controlnet, ControlNetModel)
else controlnet.nets[0].config.global_pool_conditions
)
guess_mode = guess_mode or global_pool_conditions
# 3. Encode input prompt
text_encoder_lora_scale = (
cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
)
prompt_embeds, negative_prompt_embeds = self.encode_prompt(
prompt,
device,
num_videos_per_prompt,
self.do_classifier_free_guidance,
negative_prompt,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
lora_scale=text_encoder_lora_scale,
clip_skip=self.clip_skip,
)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
if self.do_classifier_free_guidance:
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
if ip_adapter_image is not None:
image_embeds = self.prepare_ip_adapter_image_embeds(
ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_videos_per_prompt
)
if isinstance(controlnet, ControlNetModel):
conditioning_frames = self.prepare_image(
image=conditioning_frames,
width=width,
height=height,
batch_size=batch_size * num_videos_per_prompt * num_frames,
num_images_per_prompt=num_videos_per_prompt,
device=device,
dtype=controlnet.dtype,
do_classifier_free_guidance=self.do_classifier_free_guidance,
guess_mode=guess_mode,
)
elif isinstance(controlnet, MultiControlNetModel):
cond_prepared_frames = []
for frame_ in conditioning_frames:
prepared_frame = self.prepare_image(
image=frame_,
width=width,
height=height,
batch_size=batch_size * num_videos_per_prompt * num_frames,
num_images_per_prompt=num_videos_per_prompt,
device=device,
dtype=controlnet.dtype,
do_classifier_free_guidance=self.do_classifier_free_guidance,
guess_mode=guess_mode,
)
cond_prepared_frames.append(prepared_frame)
conditioning_frames = cond_prepared_frames
else:
assert False
# 4. Prepare timesteps
self.scheduler.set_timesteps(num_inference_steps, device=device)
timesteps = self.scheduler.timesteps
self._num_timesteps = len(timesteps)
# 5. Prepare latent variables
num_channels_latents = self.unet.config.in_channels
latents = self.prepare_latents(
batch_size * num_videos_per_prompt,
num_channels_latents,
num_frames,
height,
width,
prompt_embeds.dtype,
device,
generator,
latents,
)
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 7. Add image embeds for IP-Adapter
added_cond_kwargs = (
{"image_embeds": image_embeds}
if ip_adapter_image is not None or ip_adapter_image_embeds is not None
else None
)
# 7.1 Create tensor stating which controlnets to keep
controlnet_keep = []
for i in range(len(timesteps)):
keeps = [
1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
for s, e in zip(control_guidance_start, control_guidance_end)
]
controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)
# 8. Denoising loop
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
with self.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
if guess_mode and self.do_classifier_free_guidance:
# Infer ControlNet only for the conditional batch.
control_model_input = latents
control_model_input = self.scheduler.scale_model_input(control_model_input, t)
controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
else:
control_model_input = latent_model_input
controlnet_prompt_embeds = prompt_embeds
controlnet_prompt_embeds = controlnet_prompt_embeds.repeat_interleave(num_frames, dim=0)
if isinstance(controlnet_keep[i], list):
cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
else:
controlnet_cond_scale = controlnet_conditioning_scale
if isinstance(controlnet_cond_scale, list):
controlnet_cond_scale = controlnet_cond_scale[0]
cond_scale = controlnet_cond_scale * controlnet_keep[i]
control_model_input = torch.transpose(control_model_input, 1, 2)
control_model_input = control_model_input.reshape(
(-1, control_model_input.shape[2], control_model_input.shape[3], control_model_input.shape[4])
)
down_block_res_samples, mid_block_res_sample = self.controlnet(
control_model_input,
t,
encoder_hidden_states=controlnet_prompt_embeds,
controlnet_cond=conditioning_frames,
conditioning_scale=cond_scale,
guess_mode=guess_mode,
return_dict=False,
)
# predict the noise residual
noise_pred = self.unet(
latent_model_input,
t,
encoder_hidden_states=prompt_embeds,
cross_attention_kwargs=self.cross_attention_kwargs,
added_cond_kwargs=added_cond_kwargs,
down_block_additional_residuals=down_block_res_samples,
mid_block_additional_residual=mid_block_res_sample,
).sample
# perform guidance
if self.do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
if callback_on_step_end is not None:
callback_kwargs = {}
for k in callback_on_step_end_tensor_inputs:
callback_kwargs[k] = locals()[k]
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
latents = callback_outputs.pop("latents", latents)
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
# call the callback, if provided
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
progress_bar.update()
if callback is not None and i % callback_steps == 0:
callback(i, t, latents)
# 9. Post processing
if output_type == "latent":
video = latents
else:
video_tensor = self.decode_latents(latents)
video = tensor2vid(video_tensor, self.image_processor, output_type=output_type)
# 10. Offload all models
self.maybe_free_model_hooks()
if not return_dict:
return (video,)
return AnimateDiffPipelineOutput(frames=video)
| diffusers/examples/community/pipeline_animatediff_controlnet.py/0 | {
"file_path": "diffusers/examples/community/pipeline_animatediff_controlnet.py",
"repo_id": "diffusers",
"token_count": 24468
} | 110 |
#!/usr/bin/env python
# coding=utf-8
# Copyright 2024 Custom Diffusion authors and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
import argparse
import itertools
import json
import logging
import math
import os
import random
import shutil
import warnings
from pathlib import Path
import numpy as np
import safetensors
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
import transformers
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import ProjectConfiguration, set_seed
from huggingface_hub import HfApi, create_repo
from huggingface_hub.utils import insecure_hashlib
from packaging import version
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms
from tqdm.auto import tqdm
from transformers import AutoTokenizer, PretrainedConfig
import diffusers
from diffusers import (
AutoencoderKL,
DDPMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
UNet2DConditionModel,
)
from diffusers.loaders import AttnProcsLayers
from diffusers.models.attention_processor import (
CustomDiffusionAttnProcessor,
CustomDiffusionAttnProcessor2_0,
CustomDiffusionXFormersAttnProcessor,
)
from diffusers.optimization import get_scheduler
from diffusers.utils import check_min_version, is_wandb_available
from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card
from diffusers.utils.import_utils import is_xformers_available
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
check_min_version("0.28.0.dev0")
logger = get_logger(__name__)
def freeze_params(params):
for param in params:
param.requires_grad = False
def save_model_card(repo_id: str, images=None, base_model=str, prompt=str, repo_folder=None):
img_str = ""
for i, image in enumerate(images):
image.save(os.path.join(repo_folder, f"image_{i}.png"))
img_str += f"\n"
model_description = f"""
# Custom Diffusion - {repo_id}
These are Custom Diffusion adaption weights for {base_model}. The weights were trained on {prompt} using [Custom Diffusion](https://www.cs.cmu.edu/~custom-diffusion). You can find some example images in the following. \n
{img_str}
\nFor more details on the training, please follow [this link](https://github.com/huggingface/diffusers/blob/main/examples/custom_diffusion).
"""
model_card = load_or_create_model_card(
repo_id_or_path=repo_id,
from_training=True,
license="creativeml-openrail-m",
base_model=base_model,
prompt=prompt,
model_description=model_description,
inference=True,
)
tags = [
"text-to-image",
"diffusers",
"stable-diffusion",
"stable-diffusion-diffusers",
"custom-diffusion",
"diffusers-training",
]
model_card = populate_model_card(model_card, tags=tags)
model_card.save(os.path.join(repo_folder, "README.md"))
def import_model_class_from_model_name_or_path(pretrained_model_name_or_path: str, revision: str):
text_encoder_config = PretrainedConfig.from_pretrained(
pretrained_model_name_or_path,
subfolder="text_encoder",
revision=revision,
)
model_class = text_encoder_config.architectures[0]
if model_class == "CLIPTextModel":
from transformers import CLIPTextModel
return CLIPTextModel
elif model_class == "RobertaSeriesModelWithTransformation":
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import RobertaSeriesModelWithTransformation
return RobertaSeriesModelWithTransformation
else:
raise ValueError(f"{model_class} is not supported.")
def collate_fn(examples, with_prior_preservation):
input_ids = [example["instance_prompt_ids"] for example in examples]
pixel_values = [example["instance_images"] for example in examples]
mask = [example["mask"] for example in examples]
# Concat class and instance examples for prior preservation.
# We do this to avoid doing two forward passes.
if with_prior_preservation:
input_ids += [example["class_prompt_ids"] for example in examples]
pixel_values += [example["class_images"] for example in examples]
mask += [example["class_mask"] for example in examples]
input_ids = torch.cat(input_ids, dim=0)
pixel_values = torch.stack(pixel_values)
mask = torch.stack(mask)
pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
mask = mask.to(memory_format=torch.contiguous_format).float()
batch = {"input_ids": input_ids, "pixel_values": pixel_values, "mask": mask.unsqueeze(1)}
return batch
class PromptDataset(Dataset):
"A simple dataset to prepare the prompts to generate class images on multiple GPUs."
def __init__(self, prompt, num_samples):
self.prompt = prompt
self.num_samples = num_samples
def __len__(self):
return self.num_samples
def __getitem__(self, index):
example = {}
example["prompt"] = self.prompt
example["index"] = index
return example
class CustomDiffusionDataset(Dataset):
"""
A dataset to prepare the instance and class images with the prompts for fine-tuning the model.
It pre-processes the images and the tokenizes prompts.
"""
def __init__(
self,
concepts_list,
tokenizer,
size=512,
mask_size=64,
center_crop=False,
with_prior_preservation=False,
num_class_images=200,
hflip=False,
aug=True,
):
self.size = size
self.mask_size = mask_size
self.center_crop = center_crop
self.tokenizer = tokenizer
self.interpolation = Image.BILINEAR
self.aug = aug
self.instance_images_path = []
self.class_images_path = []
self.with_prior_preservation = with_prior_preservation
for concept in concepts_list:
inst_img_path = [
(x, concept["instance_prompt"]) for x in Path(concept["instance_data_dir"]).iterdir() if x.is_file()
]
self.instance_images_path.extend(inst_img_path)
if with_prior_preservation:
class_data_root = Path(concept["class_data_dir"])
if os.path.isdir(class_data_root):
class_images_path = list(class_data_root.iterdir())
class_prompt = [concept["class_prompt"] for _ in range(len(class_images_path))]
else:
with open(class_data_root, "r") as f:
class_images_path = f.read().splitlines()
with open(concept["class_prompt"], "r") as f:
class_prompt = f.read().splitlines()
class_img_path = list(zip(class_images_path, class_prompt))
self.class_images_path.extend(class_img_path[:num_class_images])
random.shuffle(self.instance_images_path)
self.num_instance_images = len(self.instance_images_path)
self.num_class_images = len(self.class_images_path)
self._length = max(self.num_class_images, self.num_instance_images)
self.flip = transforms.RandomHorizontalFlip(0.5 * hflip)
self.image_transforms = transforms.Compose(
[
self.flip,
transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR),
transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __len__(self):
return self._length
def preprocess(self, image, scale, resample):
outer, inner = self.size, scale
factor = self.size // self.mask_size
if scale > self.size:
outer, inner = scale, self.size
top, left = np.random.randint(0, outer - inner + 1), np.random.randint(0, outer - inner + 1)
image = image.resize((scale, scale), resample=resample)
image = np.array(image).astype(np.uint8)
image = (image / 127.5 - 1.0).astype(np.float32)
instance_image = np.zeros((self.size, self.size, 3), dtype=np.float32)
mask = np.zeros((self.size // factor, self.size // factor))
if scale > self.size:
instance_image = image[top : top + inner, left : left + inner, :]
mask = np.ones((self.size // factor, self.size // factor))
else:
instance_image[top : top + inner, left : left + inner, :] = image
mask[
top // factor + 1 : (top + scale) // factor - 1, left // factor + 1 : (left + scale) // factor - 1
] = 1.0
return instance_image, mask
def __getitem__(self, index):
example = {}
instance_image, instance_prompt = self.instance_images_path[index % self.num_instance_images]
instance_image = Image.open(instance_image)
if not instance_image.mode == "RGB":
instance_image = instance_image.convert("RGB")
instance_image = self.flip(instance_image)
# apply resize augmentation and create a valid image region mask
random_scale = self.size
if self.aug:
random_scale = (
np.random.randint(self.size // 3, self.size + 1)
if np.random.uniform() < 0.66
else np.random.randint(int(1.2 * self.size), int(1.4 * self.size))
)
instance_image, mask = self.preprocess(instance_image, random_scale, self.interpolation)
if random_scale < 0.6 * self.size:
instance_prompt = np.random.choice(["a far away ", "very small "]) + instance_prompt
elif random_scale > self.size:
instance_prompt = np.random.choice(["zoomed in ", "close up "]) + instance_prompt
example["instance_images"] = torch.from_numpy(instance_image).permute(2, 0, 1)
example["mask"] = torch.from_numpy(mask)
example["instance_prompt_ids"] = self.tokenizer(
instance_prompt,
truncation=True,
padding="max_length",
max_length=self.tokenizer.model_max_length,
return_tensors="pt",
).input_ids
if self.with_prior_preservation:
class_image, class_prompt = self.class_images_path[index % self.num_class_images]
class_image = Image.open(class_image)
if not class_image.mode == "RGB":
class_image = class_image.convert("RGB")
example["class_images"] = self.image_transforms(class_image)
example["class_mask"] = torch.ones_like(example["mask"])
example["class_prompt_ids"] = self.tokenizer(
class_prompt,
truncation=True,
padding="max_length",
max_length=self.tokenizer.model_max_length,
return_tensors="pt",
).input_ids
return example
def save_new_embed(text_encoder, modifier_token_id, accelerator, args, output_dir, safe_serialization=True):
"""Saves the new token embeddings from the text encoder."""
logger.info("Saving embeddings")
learned_embeds = accelerator.unwrap_model(text_encoder).get_input_embeddings().weight
for x, y in zip(modifier_token_id, args.modifier_token):
learned_embeds_dict = {}
learned_embeds_dict[y] = learned_embeds[x]
filename = f"{output_dir}/{y}.bin"
if safe_serialization:
safetensors.torch.save_file(learned_embeds_dict, filename, metadata={"format": "pt"})
else:
torch.save(learned_embeds_dict, filename)
def parse_args(input_args=None):
parser = argparse.ArgumentParser(description="Custom Diffusion training script.")
parser.add_argument(
"--pretrained_model_name_or_path",
type=str,
default=None,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models.",
)
parser.add_argument(
"--revision",
type=str,
default=None,
required=False,
help="Revision of pretrained model identifier from huggingface.co/models.",
)
parser.add_argument(
"--variant",
type=str,
default=None,
help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16",
)
parser.add_argument(
"--tokenizer_name",
type=str,
default=None,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--instance_data_dir",
type=str,
default=None,
help="A folder containing the training data of instance images.",
)
parser.add_argument(
"--class_data_dir",
type=str,
default=None,
help="A folder containing the training data of class images.",
)
parser.add_argument(
"--instance_prompt",
type=str,
default=None,
help="The prompt with identifier specifying the instance",
)
parser.add_argument(
"--class_prompt",
type=str,
default=None,
help="The prompt to specify images in the same class as provided instance images.",
)
parser.add_argument(
"--validation_prompt",
type=str,
default=None,
help="A prompt that is used during validation to verify that the model is learning.",
)
parser.add_argument(
"--num_validation_images",
type=int,
default=2,
help="Number of images that should be generated during validation with `validation_prompt`.",
)
parser.add_argument(
"--validation_steps",
type=int,
default=50,
help=(
"Run dreambooth validation every X epochs. Dreambooth validation consists of running the prompt"
" `args.validation_prompt` multiple times: `args.num_validation_images`."
),
)
parser.add_argument(
"--with_prior_preservation",
default=False,
action="store_true",
help="Flag to add prior preservation loss.",
)
parser.add_argument(
"--real_prior",
default=False,
action="store_true",
help="real images as prior.",
)
parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.")
parser.add_argument(
"--num_class_images",
type=int,
default=200,
help=(
"Minimal class images for prior preservation loss. If there are not enough images already present in"
" class_data_dir, additional images will be sampled with class_prompt."
),
)
parser.add_argument(
"--output_dir",
type=str,
default="custom-diffusion-model",
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument("--seed", type=int, default=42, help="A seed for reproducible training.")
parser.add_argument(
"--resolution",
type=int,
default=512,
help=(
"The resolution for input images, all the images in the train/validation dataset will be resized to this"
" resolution"
),
)
parser.add_argument(
"--center_crop",
default=False,
action="store_true",
help=(
"Whether to center crop the input images to the resolution. If not set, the images will be randomly"
" cropped. The images will be resized to the resolution first before cropping."
),
)
parser.add_argument(
"--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader."
)
parser.add_argument(
"--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images."
)
parser.add_argument("--num_train_epochs", type=int, default=1)
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--checkpointing_steps",
type=int,
default=250,
help=(
"Save a checkpoint of the training state every X updates. These checkpoints can be used both as final"
" checkpoints in case they are better than the last checkpoint, and are also suitable for resuming"
" training using `--resume_from_checkpoint`."
),
)
parser.add_argument(
"--checkpoints_total_limit",
type=int,
default=None,
help=("Max number of checkpoints to store."),
)
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default=None,
help=(
"Whether training should be resumed from a previous checkpoint. Use a path saved by"
' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
),
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--gradient_checkpointing",
action="store_true",
help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=1e-5,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument(
"--scale_lr",
action="store_true",
default=False,
help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
)
parser.add_argument(
"--dataloader_num_workers",
type=int,
default=2,
help=(
"Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
),
)
parser.add_argument(
"--freeze_model",
type=str,
default="crossattn_kv",
choices=["crossattn_kv", "crossattn"],
help="crossattn to enable fine-tuning of all params in the cross attention",
)
parser.add_argument(
"--lr_scheduler",
type=str,
default="constant",
help=(
'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
' "constant", "constant_with_warmup"]'
),
)
parser.add_argument(
"--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument(
"--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
)
parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
parser.add_argument(
"--hub_model_id",
type=str,
default=None,
help="The name of the repository to keep in sync with the local `output_dir`.",
)
parser.add_argument(
"--logging_dir",
type=str,
default="logs",
help=(
"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
),
)
parser.add_argument(
"--allow_tf32",
action="store_true",
help=(
"Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
" https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
),
)
parser.add_argument(
"--report_to",
type=str,
default="tensorboard",
help=(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
),
)
parser.add_argument(
"--mixed_precision",
type=str,
default=None,
choices=["no", "fp16", "bf16"],
help=(
"Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
" 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
" flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
),
)
parser.add_argument(
"--prior_generation_precision",
type=str,
default=None,
choices=["no", "fp32", "fp16", "bf16"],
help=(
"Choose prior generation precision between fp32, fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
" 1.10.and an Nvidia Ampere GPU. Default to fp16 if a GPU is available else fp32."
),
)
parser.add_argument(
"--concepts_list",
type=str,
default=None,
help="Path to json containing multiple concepts, will overwrite parameters like instance_prompt, class_prompt, etc.",
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument(
"--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers."
)
parser.add_argument(
"--set_grads_to_none",
action="store_true",
help=(
"Save more memory by using setting grads to None instead of zero. Be aware, that this changes certain"
" behaviors, so disable this argument if it causes any problems. More info:"
" https://pytorch.org/docs/stable/generated/torch.optim.Optimizer.zero_grad.html"
),
)
parser.add_argument(
"--modifier_token",
type=str,
default=None,
help="A token to use as a modifier for the concept.",
)
parser.add_argument(
"--initializer_token", type=str, default="ktn+pll+ucd", help="A token to use as initializer word."
)
parser.add_argument("--hflip", action="store_true", help="Apply horizontal flip data augmentation.")
parser.add_argument(
"--noaug",
action="store_true",
help="Dont apply augmentation during data augmentation when this flag is enabled.",
)
parser.add_argument(
"--no_safe_serialization",
action="store_true",
help="If specified save the checkpoint not in `safetensors` format, but in original PyTorch format instead.",
)
if input_args is not None:
args = parser.parse_args(input_args)
else:
args = parser.parse_args()
env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
if env_local_rank != -1 and env_local_rank != args.local_rank:
args.local_rank = env_local_rank
if args.with_prior_preservation:
if args.concepts_list is None:
if args.class_data_dir is None:
raise ValueError("You must specify a data directory for class images.")
if args.class_prompt is None:
raise ValueError("You must specify prompt for class images.")
else:
# logger is not available yet
if args.class_data_dir is not None:
warnings.warn("You need not use --class_data_dir without --with_prior_preservation.")
if args.class_prompt is not None:
warnings.warn("You need not use --class_prompt without --with_prior_preservation.")
return args
def main(args):
if args.report_to == "wandb" and args.hub_token is not None:
raise ValueError(
"You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token."
" Please use `huggingface-cli login` to authenticate with the Hub."
)
logging_dir = Path(args.output_dir, args.logging_dir)
accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir)
accelerator = Accelerator(
gradient_accumulation_steps=args.gradient_accumulation_steps,
mixed_precision=args.mixed_precision,
log_with=args.report_to,
project_config=accelerator_project_config,
)
if args.report_to == "wandb":
if not is_wandb_available():
raise ImportError("Make sure to install wandb if you want to use it for logging during training.")
import wandb
# Currently, it's not possible to do gradient accumulation when training two models with accelerate.accumulate
# This will be enabled soon in accelerate. For now, we don't allow gradient accumulation when training two models.
# TODO (patil-suraj): Remove this check when gradient accumulation with two models is enabled in accelerate.
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state, main_process_only=False)
if accelerator.is_local_main_process:
transformers.utils.logging.set_verbosity_warning()
diffusers.utils.logging.set_verbosity_info()
else:
transformers.utils.logging.set_verbosity_error()
diffusers.utils.logging.set_verbosity_error()
# We need to initialize the trackers we use, and also store our configuration.
# The trackers initializes automatically on the main process.
if accelerator.is_main_process:
accelerator.init_trackers("custom-diffusion", config=vars(args))
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
if args.concepts_list is None:
args.concepts_list = [
{
"instance_prompt": args.instance_prompt,
"class_prompt": args.class_prompt,
"instance_data_dir": args.instance_data_dir,
"class_data_dir": args.class_data_dir,
}
]
else:
with open(args.concepts_list, "r") as f:
args.concepts_list = json.load(f)
# Generate class images if prior preservation is enabled.
if args.with_prior_preservation:
for i, concept in enumerate(args.concepts_list):
class_images_dir = Path(concept["class_data_dir"])
if not class_images_dir.exists():
class_images_dir.mkdir(parents=True, exist_ok=True)
if args.real_prior:
assert (
class_images_dir / "images"
).exists(), f"Please run: python retrieve.py --class_prompt \"{concept['class_prompt']}\" --class_data_dir {class_images_dir} --num_class_images {args.num_class_images}"
assert (
len(list((class_images_dir / "images").iterdir())) == args.num_class_images
), f"Please run: python retrieve.py --class_prompt \"{concept['class_prompt']}\" --class_data_dir {class_images_dir} --num_class_images {args.num_class_images}"
assert (
class_images_dir / "caption.txt"
).exists(), f"Please run: python retrieve.py --class_prompt \"{concept['class_prompt']}\" --class_data_dir {class_images_dir} --num_class_images {args.num_class_images}"
assert (
class_images_dir / "images.txt"
).exists(), f"Please run: python retrieve.py --class_prompt \"{concept['class_prompt']}\" --class_data_dir {class_images_dir} --num_class_images {args.num_class_images}"
concept["class_prompt"] = os.path.join(class_images_dir, "caption.txt")
concept["class_data_dir"] = os.path.join(class_images_dir, "images.txt")
args.concepts_list[i] = concept
accelerator.wait_for_everyone()
else:
cur_class_images = len(list(class_images_dir.iterdir()))
if cur_class_images < args.num_class_images:
torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32
if args.prior_generation_precision == "fp32":
torch_dtype = torch.float32
elif args.prior_generation_precision == "fp16":
torch_dtype = torch.float16
elif args.prior_generation_precision == "bf16":
torch_dtype = torch.bfloat16
pipeline = DiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path,
torch_dtype=torch_dtype,
safety_checker=None,
revision=args.revision,
variant=args.variant,
)
pipeline.set_progress_bar_config(disable=True)
num_new_images = args.num_class_images - cur_class_images
logger.info(f"Number of class images to sample: {num_new_images}.")
sample_dataset = PromptDataset(concept["class_prompt"], num_new_images)
sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size)
sample_dataloader = accelerator.prepare(sample_dataloader)
pipeline.to(accelerator.device)
for example in tqdm(
sample_dataloader,
desc="Generating class images",
disable=not accelerator.is_local_main_process,
):
images = pipeline(example["prompt"]).images
for i, image in enumerate(images):
hash_image = insecure_hashlib.sha1(image.tobytes()).hexdigest()
image_filename = (
class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg"
)
image.save(image_filename)
del pipeline
if torch.cuda.is_available():
torch.cuda.empty_cache()
# Handle the repository creation
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
if args.push_to_hub:
repo_id = create_repo(
repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token
).repo_id
# Load the tokenizer
if args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(
args.tokenizer_name,
revision=args.revision,
use_fast=False,
)
elif args.pretrained_model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(
args.pretrained_model_name_or_path,
subfolder="tokenizer",
revision=args.revision,
use_fast=False,
)
# import correct text encoder class
text_encoder_cls = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path, args.revision)
# Load scheduler and models
noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
text_encoder = text_encoder_cls.from_pretrained(
args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, variant=args.variant
)
vae = AutoencoderKL.from_pretrained(
args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision, variant=args.variant
)
unet = UNet2DConditionModel.from_pretrained(
args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision, variant=args.variant
)
# Adding a modifier token which is optimized ####
# Code taken from https://github.com/huggingface/diffusers/blob/main/examples/textual_inversion/textual_inversion.py
modifier_token_id = []
initializer_token_id = []
if args.modifier_token is not None:
args.modifier_token = args.modifier_token.split("+")
args.initializer_token = args.initializer_token.split("+")
if len(args.modifier_token) > len(args.initializer_token):
raise ValueError("You must specify + separated initializer token for each modifier token.")
for modifier_token, initializer_token in zip(
args.modifier_token, args.initializer_token[: len(args.modifier_token)]
):
# Add the placeholder token in tokenizer
num_added_tokens = tokenizer.add_tokens(modifier_token)
if num_added_tokens == 0:
raise ValueError(
f"The tokenizer already contains the token {modifier_token}. Please pass a different"
" `modifier_token` that is not already in the tokenizer."
)
# Convert the initializer_token, placeholder_token to ids
token_ids = tokenizer.encode([initializer_token], add_special_tokens=False)
print(token_ids)
# Check if initializer_token is a single token or a sequence of tokens
if len(token_ids) > 1:
raise ValueError("The initializer token must be a single token.")
initializer_token_id.append(token_ids[0])
modifier_token_id.append(tokenizer.convert_tokens_to_ids(modifier_token))
# Resize the token embeddings as we are adding new special tokens to the tokenizer
text_encoder.resize_token_embeddings(len(tokenizer))
# Initialise the newly added placeholder token with the embeddings of the initializer token
token_embeds = text_encoder.get_input_embeddings().weight.data
for x, y in zip(modifier_token_id, initializer_token_id):
token_embeds[x] = token_embeds[y]
# Freeze all parameters except for the token embeddings in text encoder
params_to_freeze = itertools.chain(
text_encoder.text_model.encoder.parameters(),
text_encoder.text_model.final_layer_norm.parameters(),
text_encoder.text_model.embeddings.position_embedding.parameters(),
)
freeze_params(params_to_freeze)
########################################################
########################################################
vae.requires_grad_(False)
if args.modifier_token is None:
text_encoder.requires_grad_(False)
unet.requires_grad_(False)
# For mixed precision training we cast the text_encoder and vae weights to half-precision
# as these models are only used for inference, keeping weights in full precision is not required.
weight_dtype = torch.float32
if accelerator.mixed_precision == "fp16":
weight_dtype = torch.float16
elif accelerator.mixed_precision == "bf16":
weight_dtype = torch.bfloat16
# Move unet, vae and text_encoder to device and cast to weight_dtype
if accelerator.mixed_precision != "fp16" and args.modifier_token is not None:
text_encoder.to(accelerator.device, dtype=weight_dtype)
unet.to(accelerator.device, dtype=weight_dtype)
vae.to(accelerator.device, dtype=weight_dtype)
attention_class = (
CustomDiffusionAttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else CustomDiffusionAttnProcessor
)
if args.enable_xformers_memory_efficient_attention:
if is_xformers_available():
import xformers
xformers_version = version.parse(xformers.__version__)
if xformers_version == version.parse("0.0.16"):
logger.warning(
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
)
attention_class = CustomDiffusionXFormersAttnProcessor
else:
raise ValueError("xformers is not available. Make sure it is installed correctly")
# now we will add new Custom Diffusion weights to the attention layers
# It's important to realize here how many attention weights will be added and of which sizes
# The sizes of the attention layers consist only of two different variables:
# 1) - the "hidden_size", which is increased according to `unet.config.block_out_channels`.
# 2) - the "cross attention size", which is set to `unet.config.cross_attention_dim`.
# Let's first see how many attention processors we will have to set.
# For Stable Diffusion, it should be equal to:
# - down blocks (2x attention layers) * (2x transformer layers) * (3x down blocks) = 12
# - mid blocks (2x attention layers) * (1x transformer layers) * (1x mid blocks) = 2
# - up blocks (2x attention layers) * (3x transformer layers) * (3x down blocks) = 18
# => 32 layers
# Only train key, value projection layers if freeze_model = 'crossattn_kv' else train all params in the cross attention layer
train_kv = True
train_q_out = False if args.freeze_model == "crossattn_kv" else True
custom_diffusion_attn_procs = {}
st = unet.state_dict()
for name, _ in unet.attn_processors.items():
cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim
if name.startswith("mid_block"):
hidden_size = unet.config.block_out_channels[-1]
elif name.startswith("up_blocks"):
block_id = int(name[len("up_blocks.")])
hidden_size = list(reversed(unet.config.block_out_channels))[block_id]
elif name.startswith("down_blocks"):
block_id = int(name[len("down_blocks.")])
hidden_size = unet.config.block_out_channels[block_id]
layer_name = name.split(".processor")[0]
weights = {
"to_k_custom_diffusion.weight": st[layer_name + ".to_k.weight"],
"to_v_custom_diffusion.weight": st[layer_name + ".to_v.weight"],
}
if train_q_out:
weights["to_q_custom_diffusion.weight"] = st[layer_name + ".to_q.weight"]
weights["to_out_custom_diffusion.0.weight"] = st[layer_name + ".to_out.0.weight"]
weights["to_out_custom_diffusion.0.bias"] = st[layer_name + ".to_out.0.bias"]
if cross_attention_dim is not None:
custom_diffusion_attn_procs[name] = attention_class(
train_kv=train_kv,
train_q_out=train_q_out,
hidden_size=hidden_size,
cross_attention_dim=cross_attention_dim,
).to(unet.device)
custom_diffusion_attn_procs[name].load_state_dict(weights)
else:
custom_diffusion_attn_procs[name] = attention_class(
train_kv=False,
train_q_out=False,
hidden_size=hidden_size,
cross_attention_dim=cross_attention_dim,
)
del st
unet.set_attn_processor(custom_diffusion_attn_procs)
custom_diffusion_layers = AttnProcsLayers(unet.attn_processors)
accelerator.register_for_checkpointing(custom_diffusion_layers)
if args.gradient_checkpointing:
unet.enable_gradient_checkpointing()
if args.modifier_token is not None:
text_encoder.gradient_checkpointing_enable()
# Enable TF32 for faster training on Ampere GPUs,
# cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
if args.allow_tf32:
torch.backends.cuda.matmul.allow_tf32 = True
if args.scale_lr:
args.learning_rate = (
args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
)
if args.with_prior_preservation:
args.learning_rate = args.learning_rate * 2.0
# Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs
if args.use_8bit_adam:
try:
import bitsandbytes as bnb
except ImportError:
raise ImportError(
"To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`."
)
optimizer_class = bnb.optim.AdamW8bit
else:
optimizer_class = torch.optim.AdamW
# Optimizer creation
optimizer = optimizer_class(
itertools.chain(text_encoder.get_input_embeddings().parameters(), custom_diffusion_layers.parameters())
if args.modifier_token is not None
else custom_diffusion_layers.parameters(),
lr=args.learning_rate,
betas=(args.adam_beta1, args.adam_beta2),
weight_decay=args.adam_weight_decay,
eps=args.adam_epsilon,
)
# Dataset and DataLoaders creation:
train_dataset = CustomDiffusionDataset(
concepts_list=args.concepts_list,
tokenizer=tokenizer,
with_prior_preservation=args.with_prior_preservation,
size=args.resolution,
mask_size=vae.encode(
torch.randn(1, 3, args.resolution, args.resolution).to(dtype=weight_dtype).to(accelerator.device)
)
.latent_dist.sample()
.size()[-1],
center_crop=args.center_crop,
num_class_images=args.num_class_images,
hflip=args.hflip,
aug=not args.noaug,
)
train_dataloader = torch.utils.data.DataLoader(
train_dataset,
batch_size=args.train_batch_size,
shuffle=True,
collate_fn=lambda examples: collate_fn(examples, args.with_prior_preservation),
num_workers=args.dataloader_num_workers,
)
# Scheduler and math around the number of training steps.
overrode_max_train_steps = False
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
overrode_max_train_steps = True
lr_scheduler = get_scheduler(
args.lr_scheduler,
optimizer=optimizer,
num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes,
num_training_steps=args.max_train_steps * accelerator.num_processes,
)
# Prepare everything with our `accelerator`.
if args.modifier_token is not None:
custom_diffusion_layers, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
custom_diffusion_layers, text_encoder, optimizer, train_dataloader, lr_scheduler
)
else:
custom_diffusion_layers, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
custom_diffusion_layers, optimizer, train_dataloader, lr_scheduler
)
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if overrode_max_train_steps:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
# Afterwards we recalculate our number of training epochs
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
# Train!
total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num batches each epoch = {len(train_dataloader)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
global_step = 0
first_epoch = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint != "latest":
path = os.path.basename(args.resume_from_checkpoint)
else:
# Get the most recent checkpoint
dirs = os.listdir(args.output_dir)
dirs = [d for d in dirs if d.startswith("checkpoint")]
dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
path = dirs[-1] if len(dirs) > 0 else None
if path is None:
accelerator.print(
f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run."
)
args.resume_from_checkpoint = None
initial_global_step = 0
else:
accelerator.print(f"Resuming from checkpoint {path}")
accelerator.load_state(os.path.join(args.output_dir, path))
global_step = int(path.split("-")[1])
initial_global_step = global_step
first_epoch = global_step // num_update_steps_per_epoch
else:
initial_global_step = 0
progress_bar = tqdm(
range(0, args.max_train_steps),
initial=initial_global_step,
desc="Steps",
# Only show the progress bar once on each machine.
disable=not accelerator.is_local_main_process,
)
for epoch in range(first_epoch, args.num_train_epochs):
unet.train()
if args.modifier_token is not None:
text_encoder.train()
for step, batch in enumerate(train_dataloader):
with accelerator.accumulate(unet), accelerator.accumulate(text_encoder):
# Convert images to latent space
latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample()
latents = latents * vae.config.scaling_factor
# Sample noise that we'll add to the latents
noise = torch.randn_like(latents)
bsz = latents.shape[0]
# Sample a random timestep for each image
timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device)
timesteps = timesteps.long()
# Add noise to the latents according to the noise magnitude at each timestep
# (this is the forward diffusion process)
noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
# Get the text embedding for conditioning
encoder_hidden_states = text_encoder(batch["input_ids"])[0]
# Predict the noise residual
model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample
# Get the target for loss depending on the prediction type
if noise_scheduler.config.prediction_type == "epsilon":
target = noise
elif noise_scheduler.config.prediction_type == "v_prediction":
target = noise_scheduler.get_velocity(latents, noise, timesteps)
else:
raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
if args.with_prior_preservation:
# Chunk the noise and model_pred into two parts and compute the loss on each part separately.
model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0)
target, target_prior = torch.chunk(target, 2, dim=0)
mask = torch.chunk(batch["mask"], 2, dim=0)[0]
# Compute instance loss
loss = F.mse_loss(model_pred.float(), target.float(), reduction="none")
loss = ((loss * mask).sum([1, 2, 3]) / mask.sum([1, 2, 3])).mean()
# Compute prior loss
prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean")
# Add the prior loss to the instance loss.
loss = loss + args.prior_loss_weight * prior_loss
else:
mask = batch["mask"]
loss = F.mse_loss(model_pred.float(), target.float(), reduction="none")
loss = ((loss * mask).sum([1, 2, 3]) / mask.sum([1, 2, 3])).mean()
accelerator.backward(loss)
# Zero out the gradients for all token embeddings except the newly added
# embeddings for the concept, as we only want to optimize the concept embeddings
if args.modifier_token is not None:
if accelerator.num_processes > 1:
grads_text_encoder = text_encoder.module.get_input_embeddings().weight.grad
else:
grads_text_encoder = text_encoder.get_input_embeddings().weight.grad
# Get the index for tokens that we want to zero the grads for
index_grads_to_zero = torch.arange(len(tokenizer)) != modifier_token_id[0]
for i in range(1, len(modifier_token_id)):
index_grads_to_zero = index_grads_to_zero & (
torch.arange(len(tokenizer)) != modifier_token_id[i]
)
grads_text_encoder.data[index_grads_to_zero, :] = grads_text_encoder.data[
index_grads_to_zero, :
].fill_(0)
if accelerator.sync_gradients:
params_to_clip = (
itertools.chain(text_encoder.parameters(), custom_diffusion_layers.parameters())
if args.modifier_token is not None
else custom_diffusion_layers.parameters()
)
accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad(set_to_none=args.set_grads_to_none)
# Checks if the accelerator has performed an optimization step behind the scenes
if accelerator.sync_gradients:
progress_bar.update(1)
global_step += 1
if global_step % args.checkpointing_steps == 0:
if accelerator.is_main_process:
# _before_ saving state, check if this save would set us over the `checkpoints_total_limit`
if args.checkpoints_total_limit is not None:
checkpoints = os.listdir(args.output_dir)
checkpoints = [d for d in checkpoints if d.startswith("checkpoint")]
checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1]))
# before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints
if len(checkpoints) >= args.checkpoints_total_limit:
num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1
removing_checkpoints = checkpoints[0:num_to_remove]
logger.info(
f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints"
)
logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}")
for removing_checkpoint in removing_checkpoints:
removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint)
shutil.rmtree(removing_checkpoint)
save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
accelerator.save_state(save_path)
logger.info(f"Saved state to {save_path}")
logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
progress_bar.set_postfix(**logs)
accelerator.log(logs, step=global_step)
if global_step >= args.max_train_steps:
break
if accelerator.is_main_process:
images = []
if args.validation_prompt is not None and global_step % args.validation_steps == 0:
logger.info(
f"Running validation... \n Generating {args.num_validation_images} images with prompt:"
f" {args.validation_prompt}."
)
# create pipeline
pipeline = DiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path,
unet=accelerator.unwrap_model(unet),
text_encoder=accelerator.unwrap_model(text_encoder),
tokenizer=tokenizer,
revision=args.revision,
variant=args.variant,
torch_dtype=weight_dtype,
)
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
pipeline = pipeline.to(accelerator.device)
pipeline.set_progress_bar_config(disable=True)
# run inference
generator = torch.Generator(device=accelerator.device).manual_seed(args.seed)
images = [
pipeline(args.validation_prompt, num_inference_steps=25, generator=generator, eta=1.0).images[
0
]
for _ in range(args.num_validation_images)
]
for tracker in accelerator.trackers:
if tracker.name == "tensorboard":
np_images = np.stack([np.asarray(img) for img in images])
tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC")
if tracker.name == "wandb":
tracker.log(
{
"validation": [
wandb.Image(image, caption=f"{i}: {args.validation_prompt}")
for i, image in enumerate(images)
]
}
)
del pipeline
torch.cuda.empty_cache()
# Save the custom diffusion layers
accelerator.wait_for_everyone()
if accelerator.is_main_process:
unet = unet.to(torch.float32)
unet.save_attn_procs(args.output_dir, safe_serialization=not args.no_safe_serialization)
save_new_embed(
text_encoder,
modifier_token_id,
accelerator,
args,
args.output_dir,
safe_serialization=not args.no_safe_serialization,
)
# Final inference
# Load previous pipeline
pipeline = DiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, revision=args.revision, variant=args.variant, torch_dtype=weight_dtype
)
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
pipeline = pipeline.to(accelerator.device)
# load attention processors
weight_name = (
"pytorch_custom_diffusion_weights.safetensors"
if not args.no_safe_serialization
else "pytorch_custom_diffusion_weights.bin"
)
pipeline.unet.load_attn_procs(args.output_dir, weight_name=weight_name)
for token in args.modifier_token:
token_weight_name = f"{token}.safetensors" if not args.no_safe_serialization else f"{token}.bin"
pipeline.load_textual_inversion(args.output_dir, weight_name=token_weight_name)
# run inference
if args.validation_prompt and args.num_validation_images > 0:
generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed else None
images = [
pipeline(args.validation_prompt, num_inference_steps=25, generator=generator, eta=1.0).images[0]
for _ in range(args.num_validation_images)
]
for tracker in accelerator.trackers:
if tracker.name == "tensorboard":
np_images = np.stack([np.asarray(img) for img in images])
tracker.writer.add_images("test", np_images, epoch, dataformats="NHWC")
if tracker.name == "wandb":
tracker.log(
{
"test": [
wandb.Image(image, caption=f"{i}: {args.validation_prompt}")
for i, image in enumerate(images)
]
}
)
if args.push_to_hub:
save_model_card(
repo_id,
images=images,
base_model=args.pretrained_model_name_or_path,
prompt=args.instance_prompt,
repo_folder=args.output_dir,
)
api = HfApi(token=args.hub_token)
api.upload_folder(
repo_id=repo_id,
folder_path=args.output_dir,
commit_message="End of training",
ignore_patterns=["step_*", "epoch_*"],
)
accelerator.end_training()
if __name__ == "__main__":
args = parse_args()
main(args)
| diffusers/examples/custom_diffusion/train_custom_diffusion.py/0 | {
"file_path": "diffusers/examples/custom_diffusion/train_custom_diffusion.py",
"repo_id": "diffusers",
"token_count": 26609
} | 111 |
# InstructPix2Pix training example
[InstructPix2Pix](https://arxiv.org/abs/2211.09800) is a method to fine-tune text-conditioned diffusion models such that they can follow an edit instruction for an input image. Models fine-tuned using this method take the following as inputs:
<p align="center">
<img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/evaluation_diffusion_models/edit-instruction.png" alt="instructpix2pix-inputs" width=600/>
</p>
The output is an "edited" image that reflects the edit instruction applied on the input image:
<p align="center">
<img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/output-gs%407-igs%401-steps%4050.png" alt="instructpix2pix-output" width=600/>
</p>
The `train_instruct_pix2pix.py` script shows how to implement the training procedure and adapt it for Stable Diffusion.
***Disclaimer: Even though `train_instruct_pix2pix.py` implements the InstructPix2Pix
training procedure while being faithful to the [original implementation](https://github.com/timothybrooks/instruct-pix2pix) we have only tested it on a [small-scale dataset](https://huggingface.co/datasets/fusing/instructpix2pix-1000-samples). This can impact the end results. For better results, we recommend longer training runs with a larger dataset. [Here](https://huggingface.co/datasets/timbrooks/instructpix2pix-clip-filtered) you can find a large dataset for InstructPix2Pix training.***
## Running locally with PyTorch
### Installing the dependencies
Before running the scripts, make sure to install the library's training dependencies:
**Important**
To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment:
```bash
git clone https://github.com/huggingface/diffusers
cd diffusers
pip install -e .
```
Then cd in the example folder and run
```bash
pip install -r requirements.txt
```
And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with:
```bash
accelerate config
```
Or for a default accelerate configuration without answering questions about your environment
```bash
accelerate config default
```
Or if your environment doesn't support an interactive shell e.g. a notebook
```python
from accelerate.utils import write_basic_config
write_basic_config()
```
### Toy example
As mentioned before, we'll use a [small toy dataset](https://huggingface.co/datasets/fusing/instructpix2pix-1000-samples) for training. The dataset
is a smaller version of the [original dataset](https://huggingface.co/datasets/timbrooks/instructpix2pix-clip-filtered) used in the InstructPix2Pix paper.
Configure environment variables such as the dataset identifier and the Stable Diffusion
checkpoint:
```bash
export MODEL_NAME="runwayml/stable-diffusion-v1-5"
export DATASET_ID="fusing/instructpix2pix-1000-samples"
```
Now, we can launch training:
```bash
accelerate launch --mixed_precision="fp16" train_instruct_pix2pix.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--dataset_name=$DATASET_ID \
--enable_xformers_memory_efficient_attention \
--resolution=256 --random_flip \
--train_batch_size=4 --gradient_accumulation_steps=4 --gradient_checkpointing \
--max_train_steps=15000 \
--checkpointing_steps=5000 --checkpoints_total_limit=1 \
--learning_rate=5e-05 --max_grad_norm=1 --lr_warmup_steps=0 \
--conditioning_dropout_prob=0.05 \
--mixed_precision=fp16 \
--seed=42 \
--push_to_hub
```
Additionally, we support performing validation inference to monitor training progress
with Weights and Biases. You can enable this feature with `report_to="wandb"`:
```bash
accelerate launch --mixed_precision="fp16" train_instruct_pix2pix.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--dataset_name=$DATASET_ID \
--enable_xformers_memory_efficient_attention \
--resolution=256 --random_flip \
--train_batch_size=4 --gradient_accumulation_steps=4 --gradient_checkpointing \
--max_train_steps=15000 \
--checkpointing_steps=5000 --checkpoints_total_limit=1 \
--learning_rate=5e-05 --max_grad_norm=1 --lr_warmup_steps=0 \
--conditioning_dropout_prob=0.05 \
--mixed_precision=fp16 \
--val_image_url="https://hf.co/datasets/diffusers/diffusers-images-docs/resolve/main/mountain.png" \
--validation_prompt="make the mountains snowy" \
--seed=42 \
--report_to=wandb \
--push_to_hub
```
We recommend this type of validation as it can be useful for model debugging. Note that you need `wandb` installed to use this. You can install `wandb` by running `pip install wandb`.
[Here](https://wandb.ai/sayakpaul/instruct-pix2pix/runs/ctr3kovq), you can find an example training run that includes some validation samples and the training hyperparameters.
***Note: In the original paper, the authors observed that even when the model is trained with an image resolution of 256x256, it generalizes well to bigger resolutions such as 512x512. This is likely because of the larger dataset they used during training.***
## Training with multiple GPUs
`accelerate` allows for seamless multi-GPU training. Follow the instructions [here](https://huggingface.co/docs/accelerate/basic_tutorials/launch)
for running distributed training with `accelerate`. Here is an example command:
```bash
accelerate launch --mixed_precision="fp16" --multi_gpu train_instruct_pix2pix.py \
--pretrained_model_name_or_path=runwayml/stable-diffusion-v1-5 \
--dataset_name=sayakpaul/instructpix2pix-1000-samples \
--use_ema \
--enable_xformers_memory_efficient_attention \
--resolution=512 --random_flip \
--train_batch_size=4 --gradient_accumulation_steps=4 --gradient_checkpointing \
--max_train_steps=15000 \
--checkpointing_steps=5000 --checkpoints_total_limit=1 \
--learning_rate=5e-05 --lr_warmup_steps=0 \
--conditioning_dropout_prob=0.05 \
--mixed_precision=fp16 \
--seed=42 \
--push_to_hub
```
## Inference
Once training is complete, we can perform inference:
```python
import PIL
import requests
import torch
from diffusers import StableDiffusionInstructPix2PixPipeline
model_id = "your_model_id" # <- replace this
pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")
generator = torch.Generator("cuda").manual_seed(0)
url = "https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/test_pix2pix_4.png"
def download_image(url):
image = PIL.Image.open(requests.get(url, stream=True).raw)
image = PIL.ImageOps.exif_transpose(image)
image = image.convert("RGB")
return image
image = download_image(url)
prompt = "wipe out the lake"
num_inference_steps = 20
image_guidance_scale = 1.5
guidance_scale = 10
edited_image = pipe(prompt,
image=image,
num_inference_steps=num_inference_steps,
image_guidance_scale=image_guidance_scale,
guidance_scale=guidance_scale,
generator=generator,
).images[0]
edited_image.save("edited_image.png")
```
An example model repo obtained using this training script can be found
here - [sayakpaul/instruct-pix2pix](https://huggingface.co/sayakpaul/instruct-pix2pix).
We encourage you to play with the following three parameters to control
speed and quality during performance:
* `num_inference_steps`
* `image_guidance_scale`
* `guidance_scale`
Particularly, `image_guidance_scale` and `guidance_scale` can have a profound impact
on the generated ("edited") image (see [here](https://twitter.com/RisingSayak/status/1628392199196151808?s=20) for an example).
If you're looking for some interesting ways to use the InstructPix2Pix training methodology, we welcome you to check out this blog post: [Instruction-tuning Stable Diffusion with InstructPix2Pix](https://huggingface.co/blog/instruction-tuning-sd).
## Stable Diffusion XL
There's an equivalent `train_instruct_pix2pix_sdxl.py` script for [Stable Diffusion XL](https://huggingface.co/papers/2307.01952). Please refer to the docs [here](./README_sdxl.md) to learn more.
| diffusers/examples/instruct_pix2pix/README.md/0 | {
"file_path": "diffusers/examples/instruct_pix2pix/README.md",
"repo_id": "diffusers",
"token_count": 2738
} | 112 |
import torch
from diffusers import StableDiffusionPipeline
model_id = "path-to-your-trained-model"
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")
prompt = "A photo of sks dog in a bucket"
image = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("dog-bucket.png")
| diffusers/examples/research_projects/colossalai/inference.py/0 | {
"file_path": "diffusers/examples/research_projects/colossalai/inference.py",
"repo_id": "diffusers",
"token_count": 127
} | 113 |
#!/usr/bin/env python
# coding=utf-8
# Copyright 2024 bram-w, The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
import argparse
import contextlib
import io
import logging
import math
import os
import shutil
from pathlib import Path
import numpy as np
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
import transformers
import wandb
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import ProjectConfiguration, set_seed
from datasets import load_dataset
from huggingface_hub import create_repo, upload_folder
from packaging import version
from peft import LoraConfig
from peft.utils import get_peft_model_state_dict
from PIL import Image
from torchvision import transforms
from tqdm.auto import tqdm
from transformers import AutoTokenizer, PretrainedConfig
import diffusers
from diffusers import (
AutoencoderKL,
DDPMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
UNet2DConditionModel,
)
from diffusers.loaders import LoraLoaderMixin
from diffusers.optimization import get_scheduler
from diffusers.utils import check_min_version, convert_state_dict_to_diffusers
from diffusers.utils.import_utils import is_xformers_available
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
check_min_version("0.25.0.dev0")
logger = get_logger(__name__)
VALIDATION_PROMPTS = [
"portrait photo of a girl, photograph, highly detailed face, depth of field, moody light, golden hour, style by Dan Winters, Russell James, Steve McCurry, centered, extremely detailed, Nikon D850, award winning photography",
"Self-portrait oil painting, a beautiful cyborg with golden hair, 8k",
"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
"A photo of beautiful mountain with realistic sunset and blue lake, highly detailed, masterpiece",
]
def import_model_class_from_model_name_or_path(pretrained_model_name_or_path: str, revision: str):
text_encoder_config = PretrainedConfig.from_pretrained(
pretrained_model_name_or_path,
subfolder="text_encoder",
revision=revision,
)
model_class = text_encoder_config.architectures[0]
if model_class == "CLIPTextModel":
from transformers import CLIPTextModel
return CLIPTextModel
else:
raise ValueError(f"{model_class} is not supported.")
def log_validation(args, unet, accelerator, weight_dtype, epoch, is_final_validation=False):
logger.info(f"Running validation... \n Generating images with prompts:\n" f" {VALIDATION_PROMPTS}.")
# create pipeline
pipeline = DiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path,
revision=args.revision,
variant=args.variant,
torch_dtype=weight_dtype,
)
if not is_final_validation:
pipeline.unet = accelerator.unwrap_model(unet)
else:
pipeline.load_lora_weights(args.output_dir, weight_name="pytorch_lora_weights.safetensors")
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
pipeline = pipeline.to(accelerator.device)
pipeline.set_progress_bar_config(disable=True)
# run inference
generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed else None
images = []
context = contextlib.nullcontext() if is_final_validation else torch.cuda.amp.autocast()
for prompt in VALIDATION_PROMPTS:
with context:
image = pipeline(prompt, num_inference_steps=25, generator=generator).images[0]
images.append(image)
tracker_key = "test" if is_final_validation else "validation"
for tracker in accelerator.trackers:
if tracker.name == "tensorboard":
np_images = np.stack([np.asarray(img) for img in images])
tracker.writer.add_images(tracker_key, np_images, epoch, dataformats="NHWC")
if tracker.name == "wandb":
tracker.log(
{
tracker_key: [
wandb.Image(image, caption=f"{i}: {VALIDATION_PROMPTS[i]}") for i, image in enumerate(images)
]
}
)
# Also log images without the LoRA params for comparison.
if is_final_validation:
pipeline.disable_lora()
no_lora_images = [
pipeline(prompt, num_inference_steps=25, generator=generator).images[0] for prompt in VALIDATION_PROMPTS
]
for tracker in accelerator.trackers:
if tracker.name == "tensorboard":
np_images = np.stack([np.asarray(img) for img in no_lora_images])
tracker.writer.add_images("test_without_lora", np_images, epoch, dataformats="NHWC")
if tracker.name == "wandb":
tracker.log(
{
"test_without_lora": [
wandb.Image(image, caption=f"{i}: {VALIDATION_PROMPTS[i]}")
for i, image in enumerate(no_lora_images)
]
}
)
def parse_args(input_args=None):
parser = argparse.ArgumentParser(description="Simple example of a training script.")
parser.add_argument(
"--pretrained_model_name_or_path",
type=str,
default=None,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models.",
)
parser.add_argument(
"--revision",
type=str,
default=None,
required=False,
help="Revision of pretrained model identifier from huggingface.co/models.",
)
parser.add_argument(
"--dataset_name",
type=str,
default=None,
help=(
"The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private,"
" dataset). It can also be a path pointing to a local copy of a dataset in your filesystem,"
" or to a folder containing files that 🤗 Datasets can understand."
),
)
parser.add_argument(
"--dataset_split_name",
type=str,
default="validation",
help="Dataset split to be used during training. Helpful to specify for conducting experimental runs.",
)
parser.add_argument(
"--variant",
type=str,
default=None,
help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16",
)
parser.add_argument(
"--run_validation",
default=False,
action="store_true",
help="Whether to run validation inference in between training and also after training. Helps to track progress.",
)
parser.add_argument(
"--validation_steps",
type=int,
default=200,
help="Run validation every X steps.",
)
parser.add_argument(
"--max_train_samples",
type=int,
default=None,
help=(
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
),
)
parser.add_argument(
"--output_dir",
type=str,
default="diffusion-dpo-lora",
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--cache_dir",
type=str,
default=None,
help="The directory where the downloaded models and datasets will be stored.",
)
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
parser.add_argument(
"--resolution",
type=int,
default=512,
help=(
"The resolution for input images, all the images in the train/validation dataset will be resized to this"
" resolution"
),
)
parser.add_argument(
"--vae_encode_batch_size",
type=int,
default=8,
help="Batch size to use for VAE encoding of the images for efficient processing.",
)
parser.add_argument(
"--no_hflip",
action="store_true",
help="whether to randomly flip images horizontally",
)
parser.add_argument(
"--random_crop",
default=False,
action="store_true",
help=(
"Whether to random crop the input images to the resolution. If not set, the images will be center-cropped."
),
)
parser.add_argument(
"--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader."
)
parser.add_argument("--num_train_epochs", type=int, default=1)
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--checkpointing_steps",
type=int,
default=500,
help=(
"Save a checkpoint of the training state every X updates. These checkpoints can be used both as final"
" checkpoints in case they are better than the last checkpoint, and are also suitable for resuming"
" training using `--resume_from_checkpoint`."
),
)
parser.add_argument(
"--checkpoints_total_limit",
type=int,
default=None,
help=("Max number of checkpoints to store."),
)
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default=None,
help=(
"Whether training should be resumed from a previous checkpoint. Use a path saved by"
' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
),
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--gradient_checkpointing",
action="store_true",
help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
)
parser.add_argument(
"--beta_dpo",
type=int,
default=2500,
help="DPO KL Divergence penalty.",
)
parser.add_argument(
"--loss_type",
type=str,
default="sigmoid",
help="DPO loss type. Can be one of 'sigmoid' (default), 'ipo', or 'cpo'",
)
parser.add_argument(
"--learning_rate",
type=float,
default=5e-4,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument(
"--scale_lr",
action="store_true",
default=False,
help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
)
parser.add_argument(
"--lr_scheduler",
type=str,
default="constant",
help=(
'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
' "constant", "constant_with_warmup"]'
),
)
parser.add_argument(
"--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument(
"--lr_num_cycles",
type=int,
default=1,
help="Number of hard resets of the lr in cosine_with_restarts scheduler.",
)
parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.")
parser.add_argument(
"--dataloader_num_workers",
type=int,
default=0,
help=(
"Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
),
)
parser.add_argument(
"--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
)
parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
parser.add_argument(
"--hub_model_id",
type=str,
default=None,
help="The name of the repository to keep in sync with the local `output_dir`.",
)
parser.add_argument(
"--logging_dir",
type=str,
default="logs",
help=(
"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
),
)
parser.add_argument(
"--allow_tf32",
action="store_true",
help=(
"Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
" https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
),
)
parser.add_argument(
"--report_to",
type=str,
default="tensorboard",
help=(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
),
)
parser.add_argument(
"--mixed_precision",
type=str,
default=None,
choices=["no", "fp16", "bf16"],
help=(
"Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
" 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
" flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
),
)
parser.add_argument(
"--prior_generation_precision",
type=str,
default=None,
choices=["no", "fp32", "fp16", "bf16"],
help=(
"Choose prior generation precision between fp32, fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
" 1.10.and an Nvidia Ampere GPU. Default to fp16 if a GPU is available else fp32."
),
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument(
"--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers."
)
parser.add_argument(
"--rank",
type=int,
default=4,
help=("The dimension of the LoRA update matrices."),
)
parser.add_argument(
"--tracker_name",
type=str,
default="diffusion-dpo-lora",
help=("The name of the tracker to report results to."),
)
if input_args is not None:
args = parser.parse_args(input_args)
else:
args = parser.parse_args()
if args.dataset_name is None:
raise ValueError("Must provide a `dataset_name`.")
env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
if env_local_rank != -1 and env_local_rank != args.local_rank:
args.local_rank = env_local_rank
return args
def tokenize_captions(tokenizer, examples):
max_length = tokenizer.model_max_length
captions = []
for caption in examples["caption"]:
captions.append(caption)
text_inputs = tokenizer(
captions, truncation=True, padding="max_length", max_length=max_length, return_tensors="pt"
)
return text_inputs.input_ids
@torch.no_grad()
def encode_prompt(text_encoder, input_ids):
text_input_ids = input_ids.to(text_encoder.device)
attention_mask = None
prompt_embeds = text_encoder(text_input_ids, attention_mask=attention_mask)
prompt_embeds = prompt_embeds[0]
return prompt_embeds
def main(args):
if args.report_to == "wandb" and args.hub_token is not None:
raise ValueError(
"You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token."
" Please use `huggingface-cli login` to authenticate with the Hub."
)
logging_dir = Path(args.output_dir, args.logging_dir)
accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir)
accelerator = Accelerator(
gradient_accumulation_steps=args.gradient_accumulation_steps,
mixed_precision=args.mixed_precision,
log_with=args.report_to,
project_config=accelerator_project_config,
)
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state, main_process_only=False)
if accelerator.is_local_main_process:
transformers.utils.logging.set_verbosity_warning()
diffusers.utils.logging.set_verbosity_info()
else:
transformers.utils.logging.set_verbosity_error()
diffusers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Handle the repository creation
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
if args.push_to_hub:
repo_id = create_repo(
repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token
).repo_id
# Load the tokenizer
tokenizer = AutoTokenizer.from_pretrained(
args.pretrained_model_name_or_path,
subfolder="tokenizer",
revision=args.revision,
use_fast=False,
)
# import correct text encoder class
text_encoder_cls = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path, args.revision)
# Load scheduler and models
noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
text_encoder = text_encoder_cls.from_pretrained(
args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, variant=args.variant
)
vae = AutoencoderKL.from_pretrained(
args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision, variant=args.variant
)
unet = UNet2DConditionModel.from_pretrained(
args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision, variant=args.variant
)
vae.requires_grad_(False)
text_encoder.requires_grad_(False)
unet.requires_grad_(False)
# For mixed precision training we cast all non-trainable weights (vae, non-lora text_encoder and non-lora unet) to half-precision
# as these weights are only used for inference, keeping weights in full precision is not required.
weight_dtype = torch.float32
if accelerator.mixed_precision == "fp16":
weight_dtype = torch.float16
elif accelerator.mixed_precision == "bf16":
weight_dtype = torch.bfloat16
# Move unet, vae and text_encoder to device and cast to weight_dtype
unet.to(accelerator.device, dtype=weight_dtype)
vae.to(accelerator.device, dtype=weight_dtype)
text_encoder.to(accelerator.device, dtype=weight_dtype)
# Set up LoRA.
unet_lora_config = LoraConfig(
r=args.rank,
lora_alpha=args.rank,
init_lora_weights="gaussian",
target_modules=["to_k", "to_q", "to_v", "to_out.0"],
)
# Add adapter and make sure the trainable params are in float32.
unet.add_adapter(unet_lora_config)
if args.mixed_precision == "fp16":
for param in unet.parameters():
# only upcast trainable parameters (LoRA) into fp32
if param.requires_grad:
param.data = param.to(torch.float32)
if args.enable_xformers_memory_efficient_attention:
if is_xformers_available():
import xformers
xformers_version = version.parse(xformers.__version__)
if xformers_version == version.parse("0.0.16"):
logger.warning(
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
)
unet.enable_xformers_memory_efficient_attention()
else:
raise ValueError("xformers is not available. Make sure it is installed correctly")
if args.gradient_checkpointing:
unet.enable_gradient_checkpointing()
# create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format
def save_model_hook(models, weights, output_dir):
if accelerator.is_main_process:
# there are only two options here. Either are just the unet attn processor layers
# or there are the unet and text encoder atten layers
unet_lora_layers_to_save = None
for model in models:
if isinstance(model, type(accelerator.unwrap_model(unet))):
unet_lora_layers_to_save = convert_state_dict_to_diffusers(get_peft_model_state_dict(model))
else:
raise ValueError(f"unexpected save model: {model.__class__}")
# make sure to pop weight so that corresponding model is not saved again
weights.pop()
LoraLoaderMixin.save_lora_weights(
output_dir,
unet_lora_layers=unet_lora_layers_to_save,
text_encoder_lora_layers=None,
)
def load_model_hook(models, input_dir):
unet_ = None
while len(models) > 0:
model = models.pop()
if isinstance(model, type(accelerator.unwrap_model(unet))):
unet_ = model
else:
raise ValueError(f"unexpected save model: {model.__class__}")
lora_state_dict, network_alphas = LoraLoaderMixin.lora_state_dict(input_dir)
LoraLoaderMixin.load_lora_into_unet(lora_state_dict, network_alphas=network_alphas, unet=unet_)
accelerator.register_save_state_pre_hook(save_model_hook)
accelerator.register_load_state_pre_hook(load_model_hook)
# Enable TF32 for faster training on Ampere GPUs,
# cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
if args.allow_tf32:
torch.backends.cuda.matmul.allow_tf32 = True
if args.scale_lr:
args.learning_rate = (
args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
)
# Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs
if args.use_8bit_adam:
try:
import bitsandbytes as bnb
except ImportError:
raise ImportError(
"To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`."
)
optimizer_class = bnb.optim.AdamW8bit
else:
optimizer_class = torch.optim.AdamW
# Optimizer creation
params_to_optimize = list(filter(lambda p: p.requires_grad, unet.parameters()))
optimizer = optimizer_class(
params_to_optimize,
lr=args.learning_rate,
betas=(args.adam_beta1, args.adam_beta2),
weight_decay=args.adam_weight_decay,
eps=args.adam_epsilon,
)
# Dataset and DataLoaders creation:
train_dataset = load_dataset(
args.dataset_name,
cache_dir=args.cache_dir,
split=args.dataset_split_name,
)
train_transforms = transforms.Compose(
[
transforms.Resize(int(args.resolution), interpolation=transforms.InterpolationMode.BILINEAR),
transforms.RandomCrop(args.resolution) if args.random_crop else transforms.CenterCrop(args.resolution),
transforms.Lambda(lambda x: x) if args.no_hflip else transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def preprocess_train(examples):
all_pixel_values = []
for col_name in ["jpg_0", "jpg_1"]:
images = [Image.open(io.BytesIO(im_bytes)).convert("RGB") for im_bytes in examples[col_name]]
pixel_values = [train_transforms(image) for image in images]
all_pixel_values.append(pixel_values)
# Double on channel dim, jpg_y then jpg_w
im_tup_iterator = zip(*all_pixel_values)
combined_pixel_values = []
for im_tup, label_0 in zip(im_tup_iterator, examples["label_0"]):
if label_0 == 0:
im_tup = im_tup[::-1]
combined_im = torch.cat(im_tup, dim=0) # no batch dim
combined_pixel_values.append(combined_im)
examples["pixel_values"] = combined_pixel_values
examples["input_ids"] = tokenize_captions(tokenizer, examples)
return examples
with accelerator.main_process_first():
if args.max_train_samples is not None:
train_dataset = train_dataset.shuffle(seed=args.seed).select(range(args.max_train_samples))
# Set the training transforms
train_dataset = train_dataset.with_transform(preprocess_train)
def collate_fn(examples):
pixel_values = torch.stack([example["pixel_values"] for example in examples])
pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
final_dict = {"pixel_values": pixel_values}
final_dict["input_ids"] = torch.stack([example["input_ids"] for example in examples])
return final_dict
train_dataloader = torch.utils.data.DataLoader(
train_dataset,
batch_size=args.train_batch_size,
shuffle=True,
collate_fn=collate_fn,
num_workers=args.dataloader_num_workers,
)
# Scheduler and math around the number of training steps.
overrode_max_train_steps = False
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
overrode_max_train_steps = True
lr_scheduler = get_scheduler(
args.lr_scheduler,
optimizer=optimizer,
num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes,
num_training_steps=args.max_train_steps * accelerator.num_processes,
num_cycles=args.lr_num_cycles,
power=args.lr_power,
)
unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
unet, optimizer, train_dataloader, lr_scheduler
)
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if overrode_max_train_steps:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
# Afterwards we recalculate our number of training epochs
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
# We need to initialize the trackers we use, and also store our configuration.
# The trackers initializes automatically on the main process.
if accelerator.is_main_process:
accelerator.init_trackers(args.tracker_name, config=vars(args))
# Train!
total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num batches each epoch = {len(train_dataloader)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
global_step = 0
first_epoch = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint != "latest":
path = os.path.basename(args.resume_from_checkpoint)
else:
# Get the mos recent checkpoint
dirs = os.listdir(args.output_dir)
dirs = [d for d in dirs if d.startswith("checkpoint")]
dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
path = dirs[-1] if len(dirs) > 0 else None
if path is None:
accelerator.print(
f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run."
)
args.resume_from_checkpoint = None
initial_global_step = 0
else:
accelerator.print(f"Resuming from checkpoint {path}")
accelerator.load_state(os.path.join(args.output_dir, path))
global_step = int(path.split("-")[1])
initial_global_step = global_step
first_epoch = global_step // num_update_steps_per_epoch
else:
initial_global_step = 0
progress_bar = tqdm(
range(0, args.max_train_steps),
initial=initial_global_step,
desc="Steps",
# Only show the progress bar once on each machine.
disable=not accelerator.is_local_main_process,
)
unet.train()
for epoch in range(first_epoch, args.num_train_epochs):
for step, batch in enumerate(train_dataloader):
with accelerator.accumulate(unet):
# (batch_size, 2*channels, h, w) -> (2*batch_size, channels, h, w)
pixel_values = batch["pixel_values"].to(dtype=weight_dtype)
feed_pixel_values = torch.cat(pixel_values.chunk(2, dim=1))
latents = []
for i in range(0, feed_pixel_values.shape[0], args.vae_encode_batch_size):
latents.append(
vae.encode(feed_pixel_values[i : i + args.vae_encode_batch_size]).latent_dist.sample()
)
latents = torch.cat(latents, dim=0)
latents = latents * vae.config.scaling_factor
# Sample noise that we'll add to the latents
noise = torch.randn_like(latents).chunk(2)[0].repeat(2, 1, 1, 1)
# Sample a random timestep for each image
bsz = latents.shape[0] // 2
timesteps = torch.randint(
0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device, dtype=torch.long
).repeat(2)
# Add noise to the model input according to the noise magnitude at each timestep
# (this is the forward diffusion process)
noisy_model_input = noise_scheduler.add_noise(latents, noise, timesteps)
# Get the text embedding for conditioning
encoder_hidden_states = encode_prompt(text_encoder, batch["input_ids"]).repeat(2, 1, 1)
# Predict the noise residual
model_pred = unet(
noisy_model_input,
timesteps,
encoder_hidden_states,
).sample
# Get the target for loss depending on the prediction type
if noise_scheduler.config.prediction_type == "epsilon":
target = noise
elif noise_scheduler.config.prediction_type == "v_prediction":
target = noise_scheduler.get_velocity(latents, noise, timesteps)
else:
raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
# Compute losses.
model_losses = F.mse_loss(model_pred.float(), target.float(), reduction="none")
model_losses = model_losses.mean(dim=list(range(1, len(model_losses.shape))))
model_losses_w, model_losses_l = model_losses.chunk(2)
# For logging
raw_model_loss = 0.5 * (model_losses_w.mean() + model_losses_l.mean())
model_diff = model_losses_w - model_losses_l # These are both LBS (as is t)
# Reference model predictions.
accelerator.unwrap_model(unet).disable_adapters()
with torch.no_grad():
ref_preds = unet(
noisy_model_input,
timesteps,
encoder_hidden_states,
).sample.detach()
ref_loss = F.mse_loss(ref_preds.float(), target.float(), reduction="none")
ref_loss = ref_loss.mean(dim=list(range(1, len(ref_loss.shape))))
ref_losses_w, ref_losses_l = ref_loss.chunk(2)
ref_diff = ref_losses_w - ref_losses_l
raw_ref_loss = ref_loss.mean()
# Re-enable adapters.
accelerator.unwrap_model(unet).enable_adapters()
# Final loss.
logits = ref_diff - model_diff
if args.loss_type == "sigmoid":
loss = -1 * F.logsigmoid(args.beta_dpo * logits).mean()
elif args.loss_type == "hinge":
loss = torch.relu(1 - args.beta_dpo * logits).mean()
elif args.loss_type == "ipo":
losses = (logits - 1 / (2 * args.beta)) ** 2
loss = losses.mean()
else:
raise ValueError(f"Unknown loss type {args.loss_type}")
implicit_acc = (logits > 0).sum().float() / logits.size(0)
implicit_acc += 0.5 * (logits == 0).sum().float() / logits.size(0)
accelerator.backward(loss)
if accelerator.sync_gradients:
accelerator.clip_grad_norm_(params_to_optimize, args.max_grad_norm)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# Checks if the accelerator has performed an optimization step behind the scenes
if accelerator.sync_gradients:
progress_bar.update(1)
global_step += 1
if accelerator.is_main_process:
if global_step % args.checkpointing_steps == 0:
# _before_ saving state, check if this save would set us over the `checkpoints_total_limit`
if args.checkpoints_total_limit is not None:
checkpoints = os.listdir(args.output_dir)
checkpoints = [d for d in checkpoints if d.startswith("checkpoint")]
checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1]))
# before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints
if len(checkpoints) >= args.checkpoints_total_limit:
num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1
removing_checkpoints = checkpoints[0:num_to_remove]
logger.info(
f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints"
)
logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}")
for removing_checkpoint in removing_checkpoints:
removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint)
shutil.rmtree(removing_checkpoint)
save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
accelerator.save_state(save_path)
logger.info(f"Saved state to {save_path}")
if args.run_validation and global_step % args.validation_steps == 0:
log_validation(
args, unet=unet, accelerator=accelerator, weight_dtype=weight_dtype, epoch=epoch
)
logs = {
"loss": loss.detach().item(),
"raw_model_loss": raw_model_loss.detach().item(),
"ref_loss": raw_ref_loss.detach().item(),
"implicit_acc": implicit_acc.detach().item(),
"lr": lr_scheduler.get_last_lr()[0],
}
progress_bar.set_postfix(**logs)
accelerator.log(logs, step=global_step)
if global_step >= args.max_train_steps:
break
# Save the lora layers
accelerator.wait_for_everyone()
if accelerator.is_main_process:
unet = accelerator.unwrap_model(unet)
unet = unet.to(torch.float32)
unet_lora_state_dict = convert_state_dict_to_diffusers(get_peft_model_state_dict(unet))
LoraLoaderMixin.save_lora_weights(
save_directory=args.output_dir, unet_lora_layers=unet_lora_state_dict, text_encoder_lora_layers=None
)
# Final validation?
if args.run_validation:
log_validation(
args,
unet=None,
accelerator=accelerator,
weight_dtype=weight_dtype,
epoch=epoch,
is_final_validation=True,
)
if args.push_to_hub:
upload_folder(
repo_id=repo_id,
folder_path=args.output_dir,
commit_message="End of training",
ignore_patterns=["step_*", "epoch_*"],
)
accelerator.end_training()
if __name__ == "__main__":
args = parse_args()
main(args)
| diffusers/examples/research_projects/diffusion_dpo/train_diffusion_dpo.py/0 | {
"file_path": "diffusers/examples/research_projects/diffusion_dpo/train_diffusion_dpo.py",
"repo_id": "diffusers",
"token_count": 17438
} | 114 |
import argparse
import itertools
import math
import os
import random
from pathlib import Path
from typing import Iterable
import numpy as np
import PIL
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
from accelerate import Accelerator
from accelerate.utils import ProjectConfiguration, set_seed
from huggingface_hub import create_repo, upload_folder
from neural_compressor.utils import logger
from packaging import version
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms
from tqdm.auto import tqdm
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel
from diffusers.optimization import get_scheduler
from diffusers.utils import make_image_grid
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
PIL_INTERPOLATION = {
"linear": PIL.Image.Resampling.BILINEAR,
"bilinear": PIL.Image.Resampling.BILINEAR,
"bicubic": PIL.Image.Resampling.BICUBIC,
"lanczos": PIL.Image.Resampling.LANCZOS,
"nearest": PIL.Image.Resampling.NEAREST,
}
else:
PIL_INTERPOLATION = {
"linear": PIL.Image.LINEAR,
"bilinear": PIL.Image.BILINEAR,
"bicubic": PIL.Image.BICUBIC,
"lanczos": PIL.Image.LANCZOS,
"nearest": PIL.Image.NEAREST,
}
# ------------------------------------------------------------------------------
def save_progress(text_encoder, placeholder_token_id, accelerator, args, save_path):
logger.info("Saving embeddings")
learned_embeds = accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[placeholder_token_id]
learned_embeds_dict = {args.placeholder_token: learned_embeds.detach().cpu()}
torch.save(learned_embeds_dict, save_path)
def parse_args():
parser = argparse.ArgumentParser(description="Example of distillation for quantization on Textual Inversion.")
parser.add_argument(
"--save_steps",
type=int,
default=500,
help="Save learned_embeds.bin every X updates steps.",
)
parser.add_argument(
"--pretrained_model_name_or_path",
type=str,
default=None,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models.",
)
parser.add_argument(
"--revision",
type=str,
default=None,
required=False,
help="Revision of pretrained model identifier from huggingface.co/models.",
)
parser.add_argument(
"--tokenizer_name",
type=str,
default=None,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--train_data_dir", type=str, default=None, required=True, help="A folder containing the training data."
)
parser.add_argument(
"--placeholder_token",
type=str,
default=None,
required=True,
help="A token to use as a placeholder for the concept.",
)
parser.add_argument(
"--initializer_token", type=str, default=None, required=True, help="A token to use as initializer word."
)
parser.add_argument("--learnable_property", type=str, default="object", help="Choose between 'object' and 'style'")
parser.add_argument("--repeats", type=int, default=100, help="How many times to repeat the training data.")
parser.add_argument(
"--output_dir",
type=str,
default="text-inversion-model",
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--cache_dir",
type=str,
default=None,
help="The directory where the downloaded models and datasets will be stored.",
)
parser.add_argument("--seed", type=int, default=42, help="A seed for reproducible training.")
parser.add_argument(
"--resolution",
type=int,
default=512,
help=(
"The resolution for input images, all the images in the train/validation dataset will be resized to this"
" resolution"
),
)
parser.add_argument(
"--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution"
)
parser.add_argument(
"--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader."
)
parser.add_argument("--num_train_epochs", type=int, default=100)
parser.add_argument(
"--max_train_steps",
type=int,
default=5000,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=1e-4,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument(
"--scale_lr",
action="store_true",
default=False,
help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
)
parser.add_argument(
"--lr_scheduler",
type=str,
default="constant",
help=(
'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
' "constant", "constant_with_warmup"]'
),
)
parser.add_argument(
"--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
parser.add_argument(
"--hub_model_id",
type=str,
default=None,
help="The name of the repository to keep in sync with the local `output_dir`.",
)
parser.add_argument(
"--logging_dir",
type=str,
default="logs",
help=(
"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
),
)
parser.add_argument(
"--mixed_precision",
type=str,
default="no",
choices=["no", "fp16", "bf16"],
help=(
"Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU."
),
)
parser.add_argument("--use_ema", action="store_true", help="Whether to use EMA model.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument("--do_quantization", action="store_true", help="Whether or not to do quantization.")
parser.add_argument("--do_distillation", action="store_true", help="Whether or not to do distillation.")
parser.add_argument(
"--verify_loading", action="store_true", help="Whether or not to verify the loading of the quantized model."
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
args = parser.parse_args()
env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
if env_local_rank != -1 and env_local_rank != args.local_rank:
args.local_rank = env_local_rank
if args.train_data_dir is None:
raise ValueError("You must specify a train data directory.")
return args
imagenet_templates_small = [
"a photo of a {}",
"a rendering of a {}",
"a cropped photo of the {}",
"the photo of a {}",
"a photo of a clean {}",
"a photo of a dirty {}",
"a dark photo of the {}",
"a photo of my {}",
"a photo of the cool {}",
"a close-up photo of a {}",
"a bright photo of the {}",
"a cropped photo of a {}",
"a photo of the {}",
"a good photo of the {}",
"a photo of one {}",
"a close-up photo of the {}",
"a rendition of the {}",
"a photo of the clean {}",
"a rendition of a {}",
"a photo of a nice {}",
"a good photo of a {}",
"a photo of the nice {}",
"a photo of the small {}",
"a photo of the weird {}",
"a photo of the large {}",
"a photo of a cool {}",
"a photo of a small {}",
]
imagenet_style_templates_small = [
"a painting in the style of {}",
"a rendering in the style of {}",
"a cropped painting in the style of {}",
"the painting in the style of {}",
"a clean painting in the style of {}",
"a dirty painting in the style of {}",
"a dark painting in the style of {}",
"a picture in the style of {}",
"a cool painting in the style of {}",
"a close-up painting in the style of {}",
"a bright painting in the style of {}",
"a cropped painting in the style of {}",
"a good painting in the style of {}",
"a close-up painting in the style of {}",
"a rendition in the style of {}",
"a nice painting in the style of {}",
"a small painting in the style of {}",
"a weird painting in the style of {}",
"a large painting in the style of {}",
]
# Adapted from torch-ema https://github.com/fadel/pytorch_ema/blob/master/torch_ema/ema.py#L14
class EMAModel:
"""
Exponential Moving Average of models weights
"""
def __init__(self, parameters: Iterable[torch.nn.Parameter], decay=0.9999):
parameters = list(parameters)
self.shadow_params = [p.clone().detach() for p in parameters]
self.decay = decay
self.optimization_step = 0
def get_decay(self, optimization_step):
"""
Compute the decay factor for the exponential moving average.
"""
value = (1 + optimization_step) / (10 + optimization_step)
return 1 - min(self.decay, value)
@torch.no_grad()
def step(self, parameters):
parameters = list(parameters)
self.optimization_step += 1
self.decay = self.get_decay(self.optimization_step)
for s_param, param in zip(self.shadow_params, parameters):
if param.requires_grad:
tmp = self.decay * (s_param - param)
s_param.sub_(tmp)
else:
s_param.copy_(param)
torch.cuda.empty_cache()
def copy_to(self, parameters: Iterable[torch.nn.Parameter]) -> None:
"""
Copy current averaged parameters into given collection of parameters.
Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
updated with the stored moving averages. If `None`, the
parameters with which this `ExponentialMovingAverage` was
initialized will be used.
"""
parameters = list(parameters)
for s_param, param in zip(self.shadow_params, parameters):
param.data.copy_(s_param.data)
def to(self, device=None, dtype=None) -> None:
r"""Move internal buffers of the ExponentialMovingAverage to `device`.
Args:
device: like `device` argument to `torch.Tensor.to`
"""
# .to() on the tensors handles None correctly
self.shadow_params = [
p.to(device=device, dtype=dtype) if p.is_floating_point() else p.to(device=device)
for p in self.shadow_params
]
class TextualInversionDataset(Dataset):
def __init__(
self,
data_root,
tokenizer,
learnable_property="object", # [object, style]
size=512,
repeats=100,
interpolation="bicubic",
flip_p=0.5,
set="train",
placeholder_token="*",
center_crop=False,
):
self.data_root = data_root
self.tokenizer = tokenizer
self.learnable_property = learnable_property
self.size = size
self.placeholder_token = placeholder_token
self.center_crop = center_crop
self.flip_p = flip_p
self.image_paths = [os.path.join(self.data_root, file_path) for file_path in os.listdir(self.data_root)]
self.num_images = len(self.image_paths)
self._length = self.num_images
if set == "train":
self._length = self.num_images * repeats
self.interpolation = {
"linear": PIL_INTERPOLATION["linear"],
"bilinear": PIL_INTERPOLATION["bilinear"],
"bicubic": PIL_INTERPOLATION["bicubic"],
"lanczos": PIL_INTERPOLATION["lanczos"],
}[interpolation]
self.templates = imagenet_style_templates_small if learnable_property == "style" else imagenet_templates_small
self.flip_transform = transforms.RandomHorizontalFlip(p=self.flip_p)
def __len__(self):
return self._length
def __getitem__(self, i):
example = {}
image = Image.open(self.image_paths[i % self.num_images])
if not image.mode == "RGB":
image = image.convert("RGB")
placeholder_string = self.placeholder_token
text = random.choice(self.templates).format(placeholder_string)
example["input_ids"] = self.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=self.tokenizer.model_max_length,
return_tensors="pt",
).input_ids[0]
# default to score-sde preprocessing
img = np.array(image).astype(np.uint8)
if self.center_crop:
crop = min(img.shape[0], img.shape[1])
(
h,
w,
) = (
img.shape[0],
img.shape[1],
)
img = img[(h - crop) // 2 : (h + crop) // 2, (w - crop) // 2 : (w + crop) // 2]
image = Image.fromarray(img)
image = image.resize((self.size, self.size), resample=self.interpolation)
image = self.flip_transform(image)
image = np.array(image).astype(np.uint8)
image = (image / 127.5 - 1.0).astype(np.float32)
example["pixel_values"] = torch.from_numpy(image).permute(2, 0, 1)
return example
def freeze_params(params):
for param in params:
param.requires_grad = False
def generate_images(pipeline, prompt="", guidance_scale=7.5, num_inference_steps=50, num_images_per_prompt=1, seed=42):
generator = torch.Generator(pipeline.device).manual_seed(seed)
images = pipeline(
prompt,
guidance_scale=guidance_scale,
num_inference_steps=num_inference_steps,
generator=generator,
num_images_per_prompt=num_images_per_prompt,
).images
_rows = int(math.sqrt(num_images_per_prompt))
grid = make_image_grid(images, rows=_rows, cols=num_images_per_prompt // _rows)
return grid
def main():
args = parse_args()
logging_dir = os.path.join(args.output_dir, args.logging_dir)
accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir)
accelerator = Accelerator(
gradient_accumulation_steps=args.gradient_accumulation_steps,
mixed_precision=args.mixed_precision,
log_with="tensorboard",
project_config=accelerator_project_config,
)
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Handle the repository creation
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
if args.push_to_hub:
repo_id = create_repo(
repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token
).repo_id
# Load the tokenizer and add the placeholder token as a additional special token
if args.tokenizer_name:
tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name)
elif args.pretrained_model_name_or_path:
tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
# Load models and create wrapper for stable diffusion
noise_scheduler = DDPMScheduler.from_config(args.pretrained_model_name_or_path, subfolder="scheduler")
text_encoder = CLIPTextModel.from_pretrained(
args.pretrained_model_name_or_path,
subfolder="text_encoder",
revision=args.revision,
)
vae = AutoencoderKL.from_pretrained(
args.pretrained_model_name_or_path,
subfolder="vae",
revision=args.revision,
)
unet = UNet2DConditionModel.from_pretrained(
args.pretrained_model_name_or_path,
subfolder="unet",
revision=args.revision,
)
train_unet = False
# Freeze vae and unet
freeze_params(vae.parameters())
if not args.do_quantization and not args.do_distillation:
# Add the placeholder token in tokenizer
num_added_tokens = tokenizer.add_tokens(args.placeholder_token)
if num_added_tokens == 0:
raise ValueError(
f"The tokenizer already contains the token {args.placeholder_token}. Please pass a different"
" `placeholder_token` that is not already in the tokenizer."
)
# Convert the initializer_token, placeholder_token to ids
token_ids = tokenizer.encode(args.initializer_token, add_special_tokens=False)
# Check if initializer_token is a single token or a sequence of tokens
if len(token_ids) > 1:
raise ValueError("The initializer token must be a single token.")
initializer_token_id = token_ids[0]
placeholder_token_id = tokenizer.convert_tokens_to_ids(args.placeholder_token)
# Resize the token embeddings as we are adding new special tokens to the tokenizer
text_encoder.resize_token_embeddings(len(tokenizer))
# Initialise the newly added placeholder token with the embeddings of the initializer token
token_embeds = text_encoder.get_input_embeddings().weight.data
token_embeds[placeholder_token_id] = token_embeds[initializer_token_id]
freeze_params(unet.parameters())
# Freeze all parameters except for the token embeddings in text encoder
params_to_freeze = itertools.chain(
text_encoder.text_model.encoder.parameters(),
text_encoder.text_model.final_layer_norm.parameters(),
text_encoder.text_model.embeddings.position_embedding.parameters(),
)
freeze_params(params_to_freeze)
else:
train_unet = True
freeze_params(text_encoder.parameters())
if args.scale_lr:
args.learning_rate = (
args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
)
# Initialize the optimizer
optimizer = torch.optim.AdamW(
# only optimize the unet or embeddings of text_encoder
unet.parameters() if train_unet else text_encoder.get_input_embeddings().parameters(),
lr=args.learning_rate,
betas=(args.adam_beta1, args.adam_beta2),
weight_decay=args.adam_weight_decay,
eps=args.adam_epsilon,
)
train_dataset = TextualInversionDataset(
data_root=args.train_data_dir,
tokenizer=tokenizer,
size=args.resolution,
placeholder_token=args.placeholder_token,
repeats=args.repeats,
learnable_property=args.learnable_property,
center_crop=args.center_crop,
set="train",
)
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=args.train_batch_size, shuffle=True)
# Scheduler and math around the number of training steps.
overrode_max_train_steps = False
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
overrode_max_train_steps = True
lr_scheduler = get_scheduler(
args.lr_scheduler,
optimizer=optimizer,
num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes,
num_training_steps=args.max_train_steps * accelerator.num_processes,
)
if not train_unet:
text_encoder = accelerator.prepare(text_encoder)
unet.to(accelerator.device)
unet.eval()
else:
unet = accelerator.prepare(unet)
text_encoder.to(accelerator.device)
text_encoder.eval()
optimizer, train_dataloader, lr_scheduler = accelerator.prepare(optimizer, train_dataloader, lr_scheduler)
# Move vae to device
vae.to(accelerator.device)
# Keep vae in eval model as we don't train these
vae.eval()
compression_manager = None
def train_func(model):
if train_unet:
unet_ = model
text_encoder_ = text_encoder
else:
unet_ = unet
text_encoder_ = model
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if overrode_max_train_steps:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
# Afterwards we recalculate our number of training epochs
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
# We need to initialize the trackers we use, and also store our configuration.
# The trackers initializes automatically on the main process.
if accelerator.is_main_process:
accelerator.init_trackers("textual_inversion", config=vars(args))
# Train!
total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
# Only show the progress bar once on each machine.
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
progress_bar.set_description("Steps")
global_step = 0
if train_unet and args.use_ema:
ema_unet = EMAModel(unet_.parameters())
for epoch in range(args.num_train_epochs):
model.train()
train_loss = 0.0
for step, batch in enumerate(train_dataloader):
with accelerator.accumulate(model):
# Convert images to latent space
latents = vae.encode(batch["pixel_values"]).latent_dist.sample().detach()
latents = latents * 0.18215
# Sample noise that we'll add to the latents
noise = torch.randn(latents.shape).to(latents.device)
bsz = latents.shape[0]
# Sample a random timestep for each image
timesteps = torch.randint(
0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device
).long()
# Add noise to the latents according to the noise magnitude at each timestep
# (this is the forward diffusion process)
noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
# Get the text embedding for conditioning
encoder_hidden_states = text_encoder_(batch["input_ids"])[0]
# Predict the noise residual
model_pred = unet_(noisy_latents, timesteps, encoder_hidden_states).sample
loss = F.mse_loss(model_pred, noise, reduction="none").mean([1, 2, 3]).mean()
if train_unet and compression_manager:
unet_inputs = {
"sample": noisy_latents,
"timestep": timesteps,
"encoder_hidden_states": encoder_hidden_states,
}
loss = compression_manager.callbacks.on_after_compute_loss(unet_inputs, model_pred, loss)
# Gather the losses across all processes for logging (if we use distributed training).
avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean()
train_loss += avg_loss.item() / args.gradient_accumulation_steps
# Backpropagate
accelerator.backward(loss)
if train_unet:
if accelerator.sync_gradients:
accelerator.clip_grad_norm_(unet_.parameters(), args.max_grad_norm)
else:
# Zero out the gradients for all token embeddings except the newly added
# embeddings for the concept, as we only want to optimize the concept embeddings
if accelerator.num_processes > 1:
grads = text_encoder_.module.get_input_embeddings().weight.grad
else:
grads = text_encoder_.get_input_embeddings().weight.grad
# Get the index for tokens that we want to zero the grads for
index_grads_to_zero = torch.arange(len(tokenizer)) != placeholder_token_id
grads.data[index_grads_to_zero, :] = grads.data[index_grads_to_zero, :].fill_(0)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# Checks if the accelerator has performed an optimization step behind the scenes
if accelerator.sync_gradients:
if train_unet and args.use_ema:
ema_unet.step(unet_.parameters())
progress_bar.update(1)
global_step += 1
accelerator.log({"train_loss": train_loss}, step=global_step)
train_loss = 0.0
if not train_unet and global_step % args.save_steps == 0:
save_path = os.path.join(args.output_dir, f"learned_embeds-steps-{global_step}.bin")
save_progress(text_encoder_, placeholder_token_id, accelerator, args, save_path)
logs = {"step_loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
progress_bar.set_postfix(**logs)
accelerator.log(logs, step=global_step)
if global_step >= args.max_train_steps:
break
accelerator.wait_for_everyone()
if train_unet and args.use_ema:
ema_unet.copy_to(unet_.parameters())
if not train_unet:
return text_encoder_
if not train_unet:
text_encoder = train_func(text_encoder)
else:
import copy
model = copy.deepcopy(unet)
confs = []
if args.do_quantization:
from neural_compressor import QuantizationAwareTrainingConfig
q_conf = QuantizationAwareTrainingConfig()
confs.append(q_conf)
if args.do_distillation:
teacher_model = copy.deepcopy(model)
def attention_fetcher(x):
return x.sample
layer_mappings = [
[
[
"conv_in",
]
],
[
[
"time_embedding",
]
],
[["down_blocks.0.attentions.0", attention_fetcher]],
[["down_blocks.0.attentions.1", attention_fetcher]],
[
[
"down_blocks.0.resnets.0",
]
],
[
[
"down_blocks.0.resnets.1",
]
],
[
[
"down_blocks.0.downsamplers.0",
]
],
[["down_blocks.1.attentions.0", attention_fetcher]],
[["down_blocks.1.attentions.1", attention_fetcher]],
[
[
"down_blocks.1.resnets.0",
]
],
[
[
"down_blocks.1.resnets.1",
]
],
[
[
"down_blocks.1.downsamplers.0",
]
],
[["down_blocks.2.attentions.0", attention_fetcher]],
[["down_blocks.2.attentions.1", attention_fetcher]],
[
[
"down_blocks.2.resnets.0",
]
],
[
[
"down_blocks.2.resnets.1",
]
],
[
[
"down_blocks.2.downsamplers.0",
]
],
[
[
"down_blocks.3.resnets.0",
]
],
[
[
"down_blocks.3.resnets.1",
]
],
[
[
"up_blocks.0.resnets.0",
]
],
[
[
"up_blocks.0.resnets.1",
]
],
[
[
"up_blocks.0.resnets.2",
]
],
[
[
"up_blocks.0.upsamplers.0",
]
],
[["up_blocks.1.attentions.0", attention_fetcher]],
[["up_blocks.1.attentions.1", attention_fetcher]],
[["up_blocks.1.attentions.2", attention_fetcher]],
[
[
"up_blocks.1.resnets.0",
]
],
[
[
"up_blocks.1.resnets.1",
]
],
[
[
"up_blocks.1.resnets.2",
]
],
[
[
"up_blocks.1.upsamplers.0",
]
],
[["up_blocks.2.attentions.0", attention_fetcher]],
[["up_blocks.2.attentions.1", attention_fetcher]],
[["up_blocks.2.attentions.2", attention_fetcher]],
[
[
"up_blocks.2.resnets.0",
]
],
[
[
"up_blocks.2.resnets.1",
]
],
[
[
"up_blocks.2.resnets.2",
]
],
[
[
"up_blocks.2.upsamplers.0",
]
],
[["up_blocks.3.attentions.0", attention_fetcher]],
[["up_blocks.3.attentions.1", attention_fetcher]],
[["up_blocks.3.attentions.2", attention_fetcher]],
[
[
"up_blocks.3.resnets.0",
]
],
[
[
"up_blocks.3.resnets.1",
]
],
[
[
"up_blocks.3.resnets.2",
]
],
[["mid_block.attentions.0", attention_fetcher]],
[
[
"mid_block.resnets.0",
]
],
[
[
"mid_block.resnets.1",
]
],
[
[
"conv_out",
]
],
]
layer_names = [layer_mapping[0][0] for layer_mapping in layer_mappings]
if not set(layer_names).issubset([n[0] for n in model.named_modules()]):
raise ValueError(
"Provided model is not compatible with the default layer_mappings, "
'please use the model fine-tuned from "CompVis/stable-diffusion-v1-4", '
"or modify the layer_mappings variable to fit your model."
f"\nDefault layer_mappings are as such:\n{layer_mappings}"
)
from neural_compressor.config import DistillationConfig, IntermediateLayersKnowledgeDistillationLossConfig
distillation_criterion = IntermediateLayersKnowledgeDistillationLossConfig(
layer_mappings=layer_mappings,
loss_types=["MSE"] * len(layer_mappings),
loss_weights=[1.0 / len(layer_mappings)] * len(layer_mappings),
add_origin_loss=True,
)
d_conf = DistillationConfig(teacher_model=teacher_model, criterion=distillation_criterion)
confs.append(d_conf)
from neural_compressor.training import prepare_compression
compression_manager = prepare_compression(model, confs)
compression_manager.callbacks.on_train_begin()
model = compression_manager.model
train_func(model)
compression_manager.callbacks.on_train_end()
# Save the resulting model and its corresponding configuration in the given directory
model.save(args.output_dir)
logger.info(f"Optimized model saved to: {args.output_dir}.")
# change to framework model for further use
model = model.model
# Create the pipeline using using the trained modules and save it.
templates = imagenet_style_templates_small if args.learnable_property == "style" else imagenet_templates_small
prompt = templates[0].format(args.placeholder_token)
if accelerator.is_main_process:
pipeline = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path,
text_encoder=accelerator.unwrap_model(text_encoder),
vae=vae,
unet=accelerator.unwrap_model(unet),
tokenizer=tokenizer,
)
pipeline.save_pretrained(args.output_dir)
pipeline = pipeline.to(unet.device)
baseline_model_images = generate_images(pipeline, prompt=prompt, seed=args.seed)
baseline_model_images.save(
os.path.join(args.output_dir, "{}_baseline_model.png".format("_".join(prompt.split())))
)
if not train_unet:
# Also save the newly trained embeddings
save_path = os.path.join(args.output_dir, "learned_embeds.bin")
save_progress(text_encoder, placeholder_token_id, accelerator, args, save_path)
else:
setattr(pipeline, "unet", accelerator.unwrap_model(model))
if args.do_quantization:
pipeline = pipeline.to(torch.device("cpu"))
optimized_model_images = generate_images(pipeline, prompt=prompt, seed=args.seed)
optimized_model_images.save(
os.path.join(args.output_dir, "{}_optimized_model.png".format("_".join(prompt.split())))
)
if args.push_to_hub:
upload_folder(
repo_id=repo_id,
folder_path=args.output_dir,
commit_message="End of training",
ignore_patterns=["step_*", "epoch_*"],
)
accelerator.end_training()
if args.do_quantization and args.verify_loading:
# Load the model obtained after Intel Neural Compressor quantization
from neural_compressor.utils.pytorch import load
loaded_model = load(args.output_dir, model=unet)
loaded_model.eval()
setattr(pipeline, "unet", loaded_model)
if args.do_quantization:
pipeline = pipeline.to(torch.device("cpu"))
loaded_model_images = generate_images(pipeline, prompt=prompt, seed=args.seed)
if loaded_model_images != optimized_model_images:
logger.info("The quantized model was not successfully loaded.")
else:
logger.info("The quantized model was successfully loaded.")
if __name__ == "__main__":
main()
| diffusers/examples/research_projects/intel_opts/textual_inversion_dfq/textual_inversion.py/0 | {
"file_path": "diffusers/examples/research_projects/intel_opts/textual_inversion_dfq/textual_inversion.py",
"repo_id": "diffusers",
"token_count": 18301
} | 115 |
## Diffusers examples with ONNXRuntime optimizations
**This research project is not actively maintained by the diffusers team. For any questions or comments, please contact Prathik Rao (prathikr), Sunghoon Choi (hanbitmyths), Ashwini Khade (askhade), or Peng Wang (pengwa) on github with any questions.**
This aims to provide diffusers examples with ONNXRuntime optimizations for training/fine-tuning unconditional image generation, text to image, and textual inversion. Please see individual directories for more details on how to run each task using ONNXRuntime.
| diffusers/examples/research_projects/onnxruntime/README.md/0 | {
"file_path": "diffusers/examples/research_projects/onnxruntime/README.md",
"repo_id": "diffusers",
"token_count": 134
} | 116 |
import os
from typing import List
import faiss
import numpy as np
import torch
from datasets import Dataset, load_dataset
from PIL import Image
from transformers import CLIPFeatureExtractor, CLIPModel, PretrainedConfig
from diffusers import logging
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
def normalize_images(images: List[Image.Image]):
images = [np.array(image) for image in images]
images = [image / 127.5 - 1 for image in images]
return images
def preprocess_images(images: List[np.array], feature_extractor: CLIPFeatureExtractor) -> torch.FloatTensor:
"""
Preprocesses a list of images into a batch of tensors.
Args:
images (:obj:`List[Image.Image]`):
A list of images to preprocess.
Returns:
:obj:`torch.FloatTensor`: A batch of tensors.
"""
images = [np.array(image) for image in images]
images = [(image + 1.0) / 2.0 for image in images]
images = feature_extractor(images, return_tensors="pt").pixel_values
return images
class IndexConfig(PretrainedConfig):
def __init__(
self,
clip_name_or_path="openai/clip-vit-large-patch14",
dataset_name="Isamu136/oxford_pets_with_l14_emb",
image_column="image",
index_name="embeddings",
index_path=None,
dataset_set="train",
metric_type=faiss.METRIC_L2,
faiss_device=-1,
**kwargs,
):
super().__init__(**kwargs)
self.clip_name_or_path = clip_name_or_path
self.dataset_name = dataset_name
self.image_column = image_column
self.index_name = index_name
self.index_path = index_path
self.dataset_set = dataset_set
self.metric_type = metric_type
self.faiss_device = faiss_device
class Index:
"""
Each index for a retrieval model is specific to the clip model used and the dataset used.
"""
def __init__(self, config: IndexConfig, dataset: Dataset):
self.config = config
self.dataset = dataset
self.index_initialized = False
self.index_name = config.index_name
self.index_path = config.index_path
self.init_index()
def set_index_name(self, index_name: str):
self.index_name = index_name
def init_index(self):
if not self.index_initialized:
if self.index_path and self.index_name:
try:
self.dataset.add_faiss_index(
column=self.index_name, metric_type=self.config.metric_type, device=self.config.faiss_device
)
self.index_initialized = True
except Exception as e:
print(e)
logger.info("Index not initialized")
if self.index_name in self.dataset.features:
self.dataset.add_faiss_index(column=self.index_name)
self.index_initialized = True
def build_index(
self,
model=None,
feature_extractor: CLIPFeatureExtractor = None,
torch_dtype=torch.float32,
):
if not self.index_initialized:
model = model or CLIPModel.from_pretrained(self.config.clip_name_or_path).to(dtype=torch_dtype)
feature_extractor = feature_extractor or CLIPFeatureExtractor.from_pretrained(
self.config.clip_name_or_path
)
self.dataset = get_dataset_with_emb_from_clip_model(
self.dataset,
model,
feature_extractor,
image_column=self.config.image_column,
index_name=self.config.index_name,
)
self.init_index()
def retrieve_imgs(self, vec, k: int = 20):
vec = np.array(vec).astype(np.float32)
return self.dataset.get_nearest_examples(self.index_name, vec, k=k)
def retrieve_imgs_batch(self, vec, k: int = 20):
vec = np.array(vec).astype(np.float32)
return self.dataset.get_nearest_examples_batch(self.index_name, vec, k=k)
def retrieve_indices(self, vec, k: int = 20):
vec = np.array(vec).astype(np.float32)
return self.dataset.search(self.index_name, vec, k=k)
def retrieve_indices_batch(self, vec, k: int = 20):
vec = np.array(vec).astype(np.float32)
return self.dataset.search_batch(self.index_name, vec, k=k)
class Retriever:
def __init__(
self,
config: IndexConfig,
index: Index = None,
dataset: Dataset = None,
model=None,
feature_extractor: CLIPFeatureExtractor = None,
):
self.config = config
self.index = index or self._build_index(config, dataset, model=model, feature_extractor=feature_extractor)
@classmethod
def from_pretrained(
cls,
retriever_name_or_path: str,
index: Index = None,
dataset: Dataset = None,
model=None,
feature_extractor: CLIPFeatureExtractor = None,
**kwargs,
):
config = kwargs.pop("config", None) or IndexConfig.from_pretrained(retriever_name_or_path, **kwargs)
return cls(config, index=index, dataset=dataset, model=model, feature_extractor=feature_extractor)
@staticmethod
def _build_index(
config: IndexConfig, dataset: Dataset = None, model=None, feature_extractor: CLIPFeatureExtractor = None
):
dataset = dataset or load_dataset(config.dataset_name)
dataset = dataset[config.dataset_set]
index = Index(config, dataset)
index.build_index(model=model, feature_extractor=feature_extractor)
return index
def save_pretrained(self, save_directory):
os.makedirs(save_directory, exist_ok=True)
if self.config.index_path is None:
index_path = os.path.join(save_directory, "hf_dataset_index.faiss")
self.index.dataset.get_index(self.config.index_name).save(index_path)
self.config.index_path = index_path
self.config.save_pretrained(save_directory)
def init_retrieval(self):
logger.info("initializing retrieval")
self.index.init_index()
def retrieve_imgs(self, embeddings: np.ndarray, k: int):
return self.index.retrieve_imgs(embeddings, k)
def retrieve_imgs_batch(self, embeddings: np.ndarray, k: int):
return self.index.retrieve_imgs_batch(embeddings, k)
def retrieve_indices(self, embeddings: np.ndarray, k: int):
return self.index.retrieve_indices(embeddings, k)
def retrieve_indices_batch(self, embeddings: np.ndarray, k: int):
return self.index.retrieve_indices_batch(embeddings, k)
def __call__(
self,
embeddings,
k: int = 20,
):
return self.index.retrieve_imgs(embeddings, k)
def map_txt_to_clip_feature(clip_model, tokenizer, prompt):
text_inputs = tokenizer(
prompt,
padding="max_length",
max_length=tokenizer.model_max_length,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids
if text_input_ids.shape[-1] > tokenizer.model_max_length:
removed_text = tokenizer.batch_decode(text_input_ids[:, tokenizer.model_max_length :])
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {tokenizer.model_max_length} tokens: {removed_text}"
)
text_input_ids = text_input_ids[:, : tokenizer.model_max_length]
text_embeddings = clip_model.get_text_features(text_input_ids.to(clip_model.device))
text_embeddings = text_embeddings / torch.linalg.norm(text_embeddings, dim=-1, keepdim=True)
text_embeddings = text_embeddings[:, None, :]
return text_embeddings[0][0].cpu().detach().numpy()
def map_img_to_model_feature(model, feature_extractor, imgs, device):
for i, image in enumerate(imgs):
if not image.mode == "RGB":
imgs[i] = image.convert("RGB")
imgs = normalize_images(imgs)
retrieved_images = preprocess_images(imgs, feature_extractor).to(device)
image_embeddings = model(retrieved_images)
image_embeddings = image_embeddings / torch.linalg.norm(image_embeddings, dim=-1, keepdim=True)
image_embeddings = image_embeddings[None, ...]
return image_embeddings.cpu().detach().numpy()[0][0]
def get_dataset_with_emb_from_model(dataset, model, feature_extractor, image_column="image", index_name="embeddings"):
return dataset.map(
lambda example: {
index_name: map_img_to_model_feature(model, feature_extractor, [example[image_column]], model.device)
}
)
def get_dataset_with_emb_from_clip_model(
dataset, clip_model, feature_extractor, image_column="image", index_name="embeddings"
):
return dataset.map(
lambda example: {
index_name: map_img_to_model_feature(
clip_model.get_image_features, feature_extractor, [example[image_column]], clip_model.device
)
}
)
| diffusers/examples/research_projects/rdm/retriever.py/0 | {
"file_path": "diffusers/examples/research_projects/rdm/retriever.py",
"repo_id": "diffusers",
"token_count": 3959
} | 117 |
#!/usr/bin/env python
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
import argparse
import logging
import math
import os
import random
import shutil
import warnings
from pathlib import Path
import numpy as np
import PIL
import safetensors
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
import transformers
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import ProjectConfiguration, set_seed
from huggingface_hub import create_repo, upload_folder
# TODO: remove and import from diffusers.utils when the new version of diffusers is released
from packaging import version
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms
from tqdm.auto import tqdm
from transformers import CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
DDPMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
StableDiffusionPipeline,
UNet2DConditionModel,
)
from diffusers.optimization import get_scheduler
from diffusers.utils import check_min_version, is_wandb_available
from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card
from diffusers.utils.import_utils import is_xformers_available
if is_wandb_available():
import wandb
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
PIL_INTERPOLATION = {
"linear": PIL.Image.Resampling.BILINEAR,
"bilinear": PIL.Image.Resampling.BILINEAR,
"bicubic": PIL.Image.Resampling.BICUBIC,
"lanczos": PIL.Image.Resampling.LANCZOS,
"nearest": PIL.Image.Resampling.NEAREST,
}
else:
PIL_INTERPOLATION = {
"linear": PIL.Image.LINEAR,
"bilinear": PIL.Image.BILINEAR,
"bicubic": PIL.Image.BICUBIC,
"lanczos": PIL.Image.LANCZOS,
"nearest": PIL.Image.NEAREST,
}
# ------------------------------------------------------------------------------
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
check_min_version("0.28.0.dev0")
logger = get_logger(__name__)
def save_model_card(repo_id: str, images: list = None, base_model: str = None, repo_folder: str = None):
img_str = ""
if images is not None:
for i, image in enumerate(images):
image.save(os.path.join(repo_folder, f"image_{i}.png"))
img_str += f"\n"
model_description = f"""
# Textual inversion text2image fine-tuning - {repo_id}
These are textual inversion adaption weights for {base_model}. You can find some example images in the following. \n
{img_str}
"""
model_card = load_or_create_model_card(
repo_id_or_path=repo_id,
from_training=True,
license="creativeml-openrail-m",
base_model=base_model,
model_description=model_description,
inference=True,
)
tags = [
"stable-diffusion",
"stable-diffusion-diffusers",
"text-to-image",
"diffusers",
"textual_inversion",
"diffusers-training",
]
model_card = populate_model_card(model_card, tags=tags)
model_card.save(os.path.join(repo_folder, "README.md"))
def log_validation(text_encoder, tokenizer, unet, vae, args, accelerator, weight_dtype, epoch):
logger.info(
f"Running validation... \n Generating {args.num_validation_images} images with prompt:"
f" {args.validation_prompt}."
)
# create pipeline (note: unet and vae are loaded again in float32)
pipeline = DiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path,
text_encoder=accelerator.unwrap_model(text_encoder),
tokenizer=tokenizer,
unet=unet,
vae=vae,
safety_checker=None,
revision=args.revision,
variant=args.variant,
torch_dtype=weight_dtype,
)
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
pipeline = pipeline.to(accelerator.device)
pipeline.set_progress_bar_config(disable=True)
# run inference
generator = None if args.seed is None else torch.Generator(device=accelerator.device).manual_seed(args.seed)
images = []
for _ in range(args.num_validation_images):
with torch.autocast("cuda"):
image = pipeline(args.validation_prompt, num_inference_steps=25, generator=generator).images[0]
images.append(image)
for tracker in accelerator.trackers:
if tracker.name == "tensorboard":
np_images = np.stack([np.asarray(img) for img in images])
tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC")
if tracker.name == "wandb":
tracker.log(
{
"validation": [
wandb.Image(image, caption=f"{i}: {args.validation_prompt}") for i, image in enumerate(images)
]
}
)
del pipeline
torch.cuda.empty_cache()
return images
def save_progress(text_encoder, placeholder_token_ids, accelerator, args, save_path, safe_serialization=True):
logger.info("Saving embeddings")
learned_embeds = (
accelerator.unwrap_model(text_encoder)
.get_input_embeddings()
.weight[min(placeholder_token_ids) : max(placeholder_token_ids) + 1]
)
learned_embeds_dict = {args.placeholder_token: learned_embeds.detach().cpu()}
if safe_serialization:
safetensors.torch.save_file(learned_embeds_dict, save_path, metadata={"format": "pt"})
else:
torch.save(learned_embeds_dict, save_path)
def parse_args():
parser = argparse.ArgumentParser(description="Simple example of a training script.")
parser.add_argument(
"--save_steps",
type=int,
default=500,
help="Save learned_embeds.bin every X updates steps.",
)
parser.add_argument(
"--save_as_full_pipeline",
action="store_true",
help="Save the complete stable diffusion pipeline.",
)
parser.add_argument(
"--num_vectors",
type=int,
default=1,
help="How many textual inversion vectors shall be used to learn the concept.",
)
parser.add_argument(
"--pretrained_model_name_or_path",
type=str,
default=None,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models.",
)
parser.add_argument(
"--revision",
type=str,
default=None,
required=False,
help="Revision of pretrained model identifier from huggingface.co/models.",
)
parser.add_argument(
"--variant",
type=str,
default=None,
help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16",
)
parser.add_argument(
"--tokenizer_name",
type=str,
default=None,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--train_data_dir", type=str, default=None, required=True, help="A folder containing the training data."
)
parser.add_argument(
"--placeholder_token",
type=str,
default=None,
required=True,
help="A token to use as a placeholder for the concept.",
)
parser.add_argument(
"--initializer_token", type=str, default=None, required=True, help="A token to use as initializer word."
)
parser.add_argument("--learnable_property", type=str, default="object", help="Choose between 'object' and 'style'")
parser.add_argument("--repeats", type=int, default=100, help="How many times to repeat the training data.")
parser.add_argument(
"--output_dir",
type=str,
default="text-inversion-model",
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
parser.add_argument(
"--resolution",
type=int,
default=512,
help=(
"The resolution for input images, all the images in the train/validation dataset will be resized to this"
" resolution"
),
)
parser.add_argument(
"--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution."
)
parser.add_argument(
"--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader."
)
parser.add_argument("--num_train_epochs", type=int, default=100)
parser.add_argument(
"--max_train_steps",
type=int,
default=5000,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--gradient_checkpointing",
action="store_true",
help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=1e-4,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument(
"--scale_lr",
action="store_true",
default=False,
help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
)
parser.add_argument(
"--lr_scheduler",
type=str,
default="constant",
help=(
'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
' "constant", "constant_with_warmup"]'
),
)
parser.add_argument(
"--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument(
"--lr_num_cycles",
type=int,
default=1,
help="Number of hard resets of the lr in cosine_with_restarts scheduler.",
)
parser.add_argument(
"--dataloader_num_workers",
type=int,
default=0,
help=(
"Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
),
)
parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
parser.add_argument(
"--hub_model_id",
type=str,
default=None,
help="The name of the repository to keep in sync with the local `output_dir`.",
)
parser.add_argument(
"--logging_dir",
type=str,
default="logs",
help=(
"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
),
)
parser.add_argument(
"--mixed_precision",
type=str,
default="no",
choices=["no", "fp16", "bf16"],
help=(
"Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and Nvidia Ampere GPU or Intel Gen 4 Xeon (and later) ."
),
)
parser.add_argument(
"--allow_tf32",
action="store_true",
help=(
"Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
" https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
),
)
parser.add_argument(
"--report_to",
type=str,
default="tensorboard",
help=(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
),
)
parser.add_argument(
"--validation_prompt",
type=str,
default=None,
help="A prompt that is used during validation to verify that the model is learning.",
)
parser.add_argument(
"--num_validation_images",
type=int,
default=4,
help="Number of images that should be generated during validation with `validation_prompt`.",
)
parser.add_argument(
"--validation_steps",
type=int,
default=100,
help=(
"Run validation every X steps. Validation consists of running the prompt"
" `args.validation_prompt` multiple times: `args.num_validation_images`"
" and logging the images."
),
)
parser.add_argument(
"--validation_epochs",
type=int,
default=None,
help=(
"Deprecated in favor of validation_steps. Run validation every X epochs. Validation consists of running the prompt"
" `args.validation_prompt` multiple times: `args.num_validation_images`"
" and logging the images."
),
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument(
"--checkpointing_steps",
type=int,
default=500,
help=(
"Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming"
" training using `--resume_from_checkpoint`."
),
)
parser.add_argument(
"--checkpoints_total_limit",
type=int,
default=None,
help=("Max number of checkpoints to store."),
)
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default=None,
help=(
"Whether training should be resumed from a previous checkpoint. Use a path saved by"
' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
),
)
parser.add_argument(
"--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers."
)
parser.add_argument(
"--no_safe_serialization",
action="store_true",
help="If specified save the checkpoint not in `safetensors` format, but in original PyTorch format instead.",
)
args = parser.parse_args()
env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
if env_local_rank != -1 and env_local_rank != args.local_rank:
args.local_rank = env_local_rank
if args.train_data_dir is None:
raise ValueError("You must specify a train data directory.")
return args
imagenet_templates_small = [
"a photo of a {}",
"a rendering of a {}",
"a cropped photo of the {}",
"the photo of a {}",
"a photo of a clean {}",
"a photo of a dirty {}",
"a dark photo of the {}",
"a photo of my {}",
"a photo of the cool {}",
"a close-up photo of a {}",
"a bright photo of the {}",
"a cropped photo of a {}",
"a photo of the {}",
"a good photo of the {}",
"a photo of one {}",
"a close-up photo of the {}",
"a rendition of the {}",
"a photo of the clean {}",
"a rendition of a {}",
"a photo of a nice {}",
"a good photo of a {}",
"a photo of the nice {}",
"a photo of the small {}",
"a photo of the weird {}",
"a photo of the large {}",
"a photo of a cool {}",
"a photo of a small {}",
]
imagenet_style_templates_small = [
"a painting in the style of {}",
"a rendering in the style of {}",
"a cropped painting in the style of {}",
"the painting in the style of {}",
"a clean painting in the style of {}",
"a dirty painting in the style of {}",
"a dark painting in the style of {}",
"a picture in the style of {}",
"a cool painting in the style of {}",
"a close-up painting in the style of {}",
"a bright painting in the style of {}",
"a cropped painting in the style of {}",
"a good painting in the style of {}",
"a close-up painting in the style of {}",
"a rendition in the style of {}",
"a nice painting in the style of {}",
"a small painting in the style of {}",
"a weird painting in the style of {}",
"a large painting in the style of {}",
]
class TextualInversionDataset(Dataset):
def __init__(
self,
data_root,
tokenizer,
learnable_property="object", # [object, style]
size=512,
repeats=100,
interpolation="bicubic",
flip_p=0.5,
set="train",
placeholder_token="*",
center_crop=False,
):
self.data_root = data_root
self.tokenizer = tokenizer
self.learnable_property = learnable_property
self.size = size
self.placeholder_token = placeholder_token
self.center_crop = center_crop
self.flip_p = flip_p
self.image_paths = [os.path.join(self.data_root, file_path) for file_path in os.listdir(self.data_root)]
self.num_images = len(self.image_paths)
self._length = self.num_images
if set == "train":
self._length = self.num_images * repeats
self.interpolation = {
"linear": PIL_INTERPOLATION["linear"],
"bilinear": PIL_INTERPOLATION["bilinear"],
"bicubic": PIL_INTERPOLATION["bicubic"],
"lanczos": PIL_INTERPOLATION["lanczos"],
}[interpolation]
self.templates = imagenet_style_templates_small if learnable_property == "style" else imagenet_templates_small
self.flip_transform = transforms.RandomHorizontalFlip(p=self.flip_p)
def __len__(self):
return self._length
def __getitem__(self, i):
example = {}
image = Image.open(self.image_paths[i % self.num_images])
if not image.mode == "RGB":
image = image.convert("RGB")
placeholder_string = self.placeholder_token
text = random.choice(self.templates).format(placeholder_string)
example["input_ids"] = self.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=self.tokenizer.model_max_length,
return_tensors="pt",
).input_ids[0]
# default to score-sde preprocessing
img = np.array(image).astype(np.uint8)
if self.center_crop:
crop = min(img.shape[0], img.shape[1])
(
h,
w,
) = (
img.shape[0],
img.shape[1],
)
img = img[(h - crop) // 2 : (h + crop) // 2, (w - crop) // 2 : (w + crop) // 2]
image = Image.fromarray(img)
image = image.resize((self.size, self.size), resample=self.interpolation)
image = self.flip_transform(image)
image = np.array(image).astype(np.uint8)
image = (image / 127.5 - 1.0).astype(np.float32)
example["pixel_values"] = torch.from_numpy(image).permute(2, 0, 1)
return example
def main():
args = parse_args()
if args.report_to == "wandb" and args.hub_token is not None:
raise ValueError(
"You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token."
" Please use `huggingface-cli login` to authenticate with the Hub."
)
logging_dir = os.path.join(args.output_dir, args.logging_dir)
accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir)
accelerator = Accelerator(
gradient_accumulation_steps=args.gradient_accumulation_steps,
mixed_precision=args.mixed_precision,
log_with=args.report_to,
project_config=accelerator_project_config,
)
if args.report_to == "wandb":
if not is_wandb_available():
raise ImportError("Make sure to install wandb if you want to use it for logging during training.")
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state, main_process_only=False)
if accelerator.is_local_main_process:
transformers.utils.logging.set_verbosity_warning()
diffusers.utils.logging.set_verbosity_info()
else:
transformers.utils.logging.set_verbosity_error()
diffusers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Handle the repository creation
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
if args.push_to_hub:
repo_id = create_repo(
repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token
).repo_id
# Load tokenizer
if args.tokenizer_name:
tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name)
elif args.pretrained_model_name_or_path:
tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
# Load scheduler and models
noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
text_encoder = CLIPTextModel.from_pretrained(
args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision
)
vae = AutoencoderKL.from_pretrained(
args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision, variant=args.variant
)
unet = UNet2DConditionModel.from_pretrained(
args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision, variant=args.variant
)
# Add the placeholder token in tokenizer
placeholder_tokens = [args.placeholder_token]
if args.num_vectors < 1:
raise ValueError(f"--num_vectors has to be larger or equal to 1, but is {args.num_vectors}")
# add dummy tokens for multi-vector
additional_tokens = []
for i in range(1, args.num_vectors):
additional_tokens.append(f"{args.placeholder_token}_{i}")
placeholder_tokens += additional_tokens
num_added_tokens = tokenizer.add_tokens(placeholder_tokens)
if num_added_tokens != args.num_vectors:
raise ValueError(
f"The tokenizer already contains the token {args.placeholder_token}. Please pass a different"
" `placeholder_token` that is not already in the tokenizer."
)
# Convert the initializer_token, placeholder_token to ids
token_ids = tokenizer.encode(args.initializer_token, add_special_tokens=False)
# Check if initializer_token is a single token or a sequence of tokens
if len(token_ids) > 1:
raise ValueError("The initializer token must be a single token.")
initializer_token_id = token_ids[0]
placeholder_token_ids = tokenizer.convert_tokens_to_ids(placeholder_tokens)
# Resize the token embeddings as we are adding new special tokens to the tokenizer
text_encoder.resize_token_embeddings(len(tokenizer))
# Initialise the newly added placeholder token with the embeddings of the initializer token
token_embeds = text_encoder.get_input_embeddings().weight.data
with torch.no_grad():
for token_id in placeholder_token_ids:
token_embeds[token_id] = token_embeds[initializer_token_id].clone()
# Freeze vae and unet
vae.requires_grad_(False)
unet.requires_grad_(False)
# Freeze all parameters except for the token embeddings in text encoder
text_encoder.text_model.encoder.requires_grad_(False)
text_encoder.text_model.final_layer_norm.requires_grad_(False)
text_encoder.text_model.embeddings.position_embedding.requires_grad_(False)
if args.gradient_checkpointing:
# Keep unet in train mode if we are using gradient checkpointing to save memory.
# The dropout cannot be != 0 so it doesn't matter if we are in eval or train mode.
unet.train()
text_encoder.gradient_checkpointing_enable()
unet.enable_gradient_checkpointing()
if args.enable_xformers_memory_efficient_attention:
if is_xformers_available():
import xformers
xformers_version = version.parse(xformers.__version__)
if xformers_version == version.parse("0.0.16"):
logger.warning(
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
)
unet.enable_xformers_memory_efficient_attention()
else:
raise ValueError("xformers is not available. Make sure it is installed correctly")
# Enable TF32 for faster training on Ampere GPUs,
# cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
if args.allow_tf32:
torch.backends.cuda.matmul.allow_tf32 = True
if args.scale_lr:
args.learning_rate = (
args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
)
# Initialize the optimizer
optimizer = torch.optim.AdamW(
text_encoder.get_input_embeddings().parameters(), # only optimize the embeddings
lr=args.learning_rate,
betas=(args.adam_beta1, args.adam_beta2),
weight_decay=args.adam_weight_decay,
eps=args.adam_epsilon,
)
# Dataset and DataLoaders creation:
train_dataset = TextualInversionDataset(
data_root=args.train_data_dir,
tokenizer=tokenizer,
size=args.resolution,
placeholder_token=(" ".join(tokenizer.convert_ids_to_tokens(placeholder_token_ids))),
repeats=args.repeats,
learnable_property=args.learnable_property,
center_crop=args.center_crop,
set="train",
)
train_dataloader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.train_batch_size, shuffle=True, num_workers=args.dataloader_num_workers
)
if args.validation_epochs is not None:
warnings.warn(
f"FutureWarning: You are doing logging with validation_epochs={args.validation_epochs}."
" Deprecated validation_epochs in favor of `validation_steps`"
f"Setting `args.validation_steps` to {args.validation_epochs * len(train_dataset)}",
FutureWarning,
stacklevel=2,
)
args.validation_steps = args.validation_epochs * len(train_dataset)
# Scheduler and math around the number of training steps.
overrode_max_train_steps = False
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
overrode_max_train_steps = True
lr_scheduler = get_scheduler(
args.lr_scheduler,
optimizer=optimizer,
num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes,
num_training_steps=args.max_train_steps * accelerator.num_processes,
num_cycles=args.lr_num_cycles,
)
text_encoder.train()
# Prepare everything with our `accelerator`.
text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
text_encoder, optimizer, train_dataloader, lr_scheduler
)
# For mixed precision training we cast all non-trainable weigths (vae, non-lora text_encoder and non-lora unet) to half-precision
# as these weights are only used for inference, keeping weights in full precision is not required.
weight_dtype = torch.float32
if accelerator.mixed_precision == "fp16":
weight_dtype = torch.float16
elif accelerator.mixed_precision == "bf16":
weight_dtype = torch.bfloat16
# Move vae and unet to device and cast to weight_dtype
unet.to(accelerator.device, dtype=weight_dtype)
vae.to(accelerator.device, dtype=weight_dtype)
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if overrode_max_train_steps:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
# Afterwards we recalculate our number of training epochs
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
# We need to initialize the trackers we use, and also store our configuration.
# The trackers initializes automatically on the main process.
if accelerator.is_main_process:
accelerator.init_trackers("textual_inversion", config=vars(args))
# Train!
total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
global_step = 0
first_epoch = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint != "latest":
path = os.path.basename(args.resume_from_checkpoint)
else:
# Get the most recent checkpoint
dirs = os.listdir(args.output_dir)
dirs = [d for d in dirs if d.startswith("checkpoint")]
dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
path = dirs[-1] if len(dirs) > 0 else None
if path is None:
accelerator.print(
f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run."
)
args.resume_from_checkpoint = None
initial_global_step = 0
else:
accelerator.print(f"Resuming from checkpoint {path}")
accelerator.load_state(os.path.join(args.output_dir, path))
global_step = int(path.split("-")[1])
initial_global_step = global_step
first_epoch = global_step // num_update_steps_per_epoch
else:
initial_global_step = 0
progress_bar = tqdm(
range(0, args.max_train_steps),
initial=initial_global_step,
desc="Steps",
# Only show the progress bar once on each machine.
disable=not accelerator.is_local_main_process,
)
# keep original embeddings as reference
orig_embeds_params = accelerator.unwrap_model(text_encoder).get_input_embeddings().weight.data.clone()
for epoch in range(first_epoch, args.num_train_epochs):
text_encoder.train()
for step, batch in enumerate(train_dataloader):
with accelerator.accumulate(text_encoder):
# Convert images to latent space
latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample().detach()
latents = latents * vae.config.scaling_factor
# Sample noise that we'll add to the latents
noise = torch.randn_like(latents)
bsz = latents.shape[0]
# Sample a random timestep for each image
timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device)
timesteps = timesteps.long()
# Add noise to the latents according to the noise magnitude at each timestep
# (this is the forward diffusion process)
noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
# Get the text embedding for conditioning
encoder_hidden_states = text_encoder(batch["input_ids"])[0].to(dtype=weight_dtype)
# Predict the noise residual
model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample
# Get the target for loss depending on the prediction type
if noise_scheduler.config.prediction_type == "epsilon":
target = noise
elif noise_scheduler.config.prediction_type == "v_prediction":
target = noise_scheduler.get_velocity(latents, noise, timesteps)
else:
raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
accelerator.backward(loss)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# Let's make sure we don't update any embedding weights besides the newly added token
index_no_updates = torch.ones((len(tokenizer),), dtype=torch.bool)
index_no_updates[min(placeholder_token_ids) : max(placeholder_token_ids) + 1] = False
with torch.no_grad():
accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[
index_no_updates
] = orig_embeds_params[index_no_updates]
# Checks if the accelerator has performed an optimization step behind the scenes
if accelerator.sync_gradients:
images = []
progress_bar.update(1)
global_step += 1
if global_step % args.save_steps == 0:
weight_name = (
f"learned_embeds-steps-{global_step}.bin"
if args.no_safe_serialization
else f"learned_embeds-steps-{global_step}.safetensors"
)
save_path = os.path.join(args.output_dir, weight_name)
save_progress(
text_encoder,
placeholder_token_ids,
accelerator,
args,
save_path,
safe_serialization=not args.no_safe_serialization,
)
if accelerator.is_main_process:
if global_step % args.checkpointing_steps == 0:
# _before_ saving state, check if this save would set us over the `checkpoints_total_limit`
if args.checkpoints_total_limit is not None:
checkpoints = os.listdir(args.output_dir)
checkpoints = [d for d in checkpoints if d.startswith("checkpoint")]
checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1]))
# before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints
if len(checkpoints) >= args.checkpoints_total_limit:
num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1
removing_checkpoints = checkpoints[0:num_to_remove]
logger.info(
f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints"
)
logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}")
for removing_checkpoint in removing_checkpoints:
removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint)
shutil.rmtree(removing_checkpoint)
save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
accelerator.save_state(save_path)
logger.info(f"Saved state to {save_path}")
if args.validation_prompt is not None and global_step % args.validation_steps == 0:
images = log_validation(
text_encoder, tokenizer, unet, vae, args, accelerator, weight_dtype, epoch
)
logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
progress_bar.set_postfix(**logs)
accelerator.log(logs, step=global_step)
if global_step >= args.max_train_steps:
break
# Create the pipeline using the trained modules and save it.
accelerator.wait_for_everyone()
if accelerator.is_main_process:
if args.push_to_hub and not args.save_as_full_pipeline:
logger.warning("Enabling full model saving because --push_to_hub=True was specified.")
save_full_model = True
else:
save_full_model = args.save_as_full_pipeline
if save_full_model:
pipeline = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path,
text_encoder=accelerator.unwrap_model(text_encoder),
vae=vae,
unet=unet,
tokenizer=tokenizer,
)
pipeline.save_pretrained(args.output_dir)
# Save the newly trained embeddings
weight_name = "learned_embeds.bin" if args.no_safe_serialization else "learned_embeds.safetensors"
save_path = os.path.join(args.output_dir, weight_name)
save_progress(
text_encoder,
placeholder_token_ids,
accelerator,
args,
save_path,
safe_serialization=not args.no_safe_serialization,
)
if args.push_to_hub:
save_model_card(
repo_id,
images=images,
base_model=args.pretrained_model_name_or_path,
repo_folder=args.output_dir,
)
upload_folder(
repo_id=repo_id,
folder_path=args.output_dir,
commit_message="End of training",
ignore_patterns=["step_*", "epoch_*"],
)
accelerator.end_training()
if __name__ == "__main__":
main()
| diffusers/examples/textual_inversion/textual_inversion.py/0 | {
"file_path": "diffusers/examples/textual_inversion/textual_inversion.py",
"repo_id": "diffusers",
"token_count": 17305
} | 118 |
import argparse
import torch
import yaml
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def convert_ldm_original(checkpoint_path, config_path, output_path):
config = yaml.safe_load(config_path)
state_dict = torch.load(checkpoint_path, map_location="cpu")["model"]
keys = list(state_dict.keys())
# extract state_dict for VQVAE
first_stage_dict = {}
first_stage_key = "first_stage_model."
for key in keys:
if key.startswith(first_stage_key):
first_stage_dict[key.replace(first_stage_key, "")] = state_dict[key]
# extract state_dict for UNetLDM
unet_state_dict = {}
unet_key = "model.diffusion_model."
for key in keys:
if key.startswith(unet_key):
unet_state_dict[key.replace(unet_key, "")] = state_dict[key]
vqvae_init_args = config["model"]["params"]["first_stage_config"]["params"]
unet_init_args = config["model"]["params"]["unet_config"]["params"]
vqvae = VQModel(**vqvae_init_args).eval()
vqvae.load_state_dict(first_stage_dict)
unet = UNetLDMModel(**unet_init_args).eval()
unet.load_state_dict(unet_state_dict)
noise_scheduler = DDIMScheduler(
timesteps=config["model"]["params"]["timesteps"],
beta_schedule="scaled_linear",
beta_start=config["model"]["params"]["linear_start"],
beta_end=config["model"]["params"]["linear_end"],
clip_sample=False,
)
pipeline = LDMPipeline(vqvae, unet, noise_scheduler)
pipeline.save_pretrained(output_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", type=str, required=True)
parser.add_argument("--config_path", type=str, required=True)
parser.add_argument("--output_path", type=str, required=True)
args = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| diffusers/scripts/conversion_ldm_uncond.py/0 | {
"file_path": "diffusers/scripts/conversion_ldm_uncond.py",
"repo_id": "diffusers",
"token_count": 793
} | 119 |
import argparse
import inspect
import os
import numpy as np
import torch
import yaml
from torch.nn import functional as F
from transformers import CLIPConfig, CLIPImageProcessor, CLIPVisionModelWithProjection, T5EncoderModel, T5Tokenizer
from diffusers import DDPMScheduler, IFPipeline, IFSuperResolutionPipeline, UNet2DConditionModel
from diffusers.pipelines.deepfloyd_if.safety_checker import IFSafetyChecker
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--dump_path", required=False, default=None, type=str)
parser.add_argument("--dump_path_stage_2", required=False, default=None, type=str)
parser.add_argument("--dump_path_stage_3", required=False, default=None, type=str)
parser.add_argument("--unet_config", required=False, default=None, type=str, help="Path to unet config file")
parser.add_argument(
"--unet_checkpoint_path", required=False, default=None, type=str, help="Path to unet checkpoint file"
)
parser.add_argument(
"--unet_checkpoint_path_stage_2",
required=False,
default=None,
type=str,
help="Path to stage 2 unet checkpoint file",
)
parser.add_argument(
"--unet_checkpoint_path_stage_3",
required=False,
default=None,
type=str,
help="Path to stage 3 unet checkpoint file",
)
parser.add_argument("--p_head_path", type=str, required=True)
parser.add_argument("--w_head_path", type=str, required=True)
args = parser.parse_args()
return args
def main(args):
tokenizer = T5Tokenizer.from_pretrained("google/t5-v1_1-xxl")
text_encoder = T5EncoderModel.from_pretrained("google/t5-v1_1-xxl")
feature_extractor = CLIPImageProcessor.from_pretrained("openai/clip-vit-large-patch14")
safety_checker = convert_safety_checker(p_head_path=args.p_head_path, w_head_path=args.w_head_path)
if args.unet_config is not None and args.unet_checkpoint_path is not None and args.dump_path is not None:
convert_stage_1_pipeline(tokenizer, text_encoder, feature_extractor, safety_checker, args)
if args.unet_checkpoint_path_stage_2 is not None and args.dump_path_stage_2 is not None:
convert_super_res_pipeline(tokenizer, text_encoder, feature_extractor, safety_checker, args, stage=2)
if args.unet_checkpoint_path_stage_3 is not None and args.dump_path_stage_3 is not None:
convert_super_res_pipeline(tokenizer, text_encoder, feature_extractor, safety_checker, args, stage=3)
def convert_stage_1_pipeline(tokenizer, text_encoder, feature_extractor, safety_checker, args):
unet = get_stage_1_unet(args.unet_config, args.unet_checkpoint_path)
scheduler = DDPMScheduler(
variance_type="learned_range",
beta_schedule="squaredcos_cap_v2",
prediction_type="epsilon",
thresholding=True,
dynamic_thresholding_ratio=0.95,
sample_max_value=1.5,
)
pipe = IFPipeline(
tokenizer=tokenizer,
text_encoder=text_encoder,
unet=unet,
scheduler=scheduler,
safety_checker=safety_checker,
feature_extractor=feature_extractor,
requires_safety_checker=True,
)
pipe.save_pretrained(args.dump_path)
def convert_super_res_pipeline(tokenizer, text_encoder, feature_extractor, safety_checker, args, stage):
if stage == 2:
unet_checkpoint_path = args.unet_checkpoint_path_stage_2
sample_size = None
dump_path = args.dump_path_stage_2
elif stage == 3:
unet_checkpoint_path = args.unet_checkpoint_path_stage_3
sample_size = 1024
dump_path = args.dump_path_stage_3
else:
assert False
unet = get_super_res_unet(unet_checkpoint_path, verify_param_count=False, sample_size=sample_size)
image_noising_scheduler = DDPMScheduler(
beta_schedule="squaredcos_cap_v2",
)
scheduler = DDPMScheduler(
variance_type="learned_range",
beta_schedule="squaredcos_cap_v2",
prediction_type="epsilon",
thresholding=True,
dynamic_thresholding_ratio=0.95,
sample_max_value=1.0,
)
pipe = IFSuperResolutionPipeline(
tokenizer=tokenizer,
text_encoder=text_encoder,
unet=unet,
scheduler=scheduler,
image_noising_scheduler=image_noising_scheduler,
safety_checker=safety_checker,
feature_extractor=feature_extractor,
requires_safety_checker=True,
)
pipe.save_pretrained(dump_path)
def get_stage_1_unet(unet_config, unet_checkpoint_path):
original_unet_config = yaml.safe_load(unet_config)
original_unet_config = original_unet_config["params"]
unet_diffusers_config = create_unet_diffusers_config(original_unet_config)
unet = UNet2DConditionModel(**unet_diffusers_config)
device = "cuda" if torch.cuda.is_available() else "cpu"
unet_checkpoint = torch.load(unet_checkpoint_path, map_location=device)
converted_unet_checkpoint = convert_ldm_unet_checkpoint(
unet_checkpoint, unet_diffusers_config, path=unet_checkpoint_path
)
unet.load_state_dict(converted_unet_checkpoint)
return unet
def convert_safety_checker(p_head_path, w_head_path):
state_dict = {}
# p head
p_head = np.load(p_head_path)
p_head_weights = p_head["weights"]
p_head_weights = torch.from_numpy(p_head_weights)
p_head_weights = p_head_weights.unsqueeze(0)
p_head_biases = p_head["biases"]
p_head_biases = torch.from_numpy(p_head_biases)
p_head_biases = p_head_biases.unsqueeze(0)
state_dict["p_head.weight"] = p_head_weights
state_dict["p_head.bias"] = p_head_biases
# w head
w_head = np.load(w_head_path)
w_head_weights = w_head["weights"]
w_head_weights = torch.from_numpy(w_head_weights)
w_head_weights = w_head_weights.unsqueeze(0)
w_head_biases = w_head["biases"]
w_head_biases = torch.from_numpy(w_head_biases)
w_head_biases = w_head_biases.unsqueeze(0)
state_dict["w_head.weight"] = w_head_weights
state_dict["w_head.bias"] = w_head_biases
# vision model
vision_model = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
vision_model_state_dict = vision_model.state_dict()
for key, value in vision_model_state_dict.items():
key = f"vision_model.{key}"
state_dict[key] = value
# full model
config = CLIPConfig.from_pretrained("openai/clip-vit-large-patch14")
safety_checker = IFSafetyChecker(config)
safety_checker.load_state_dict(state_dict)
return safety_checker
def create_unet_diffusers_config(original_unet_config, class_embed_type=None):
attention_resolutions = parse_list(original_unet_config["attention_resolutions"])
attention_resolutions = [original_unet_config["image_size"] // int(res) for res in attention_resolutions]
channel_mult = parse_list(original_unet_config["channel_mult"])
block_out_channels = [original_unet_config["model_channels"] * mult for mult in channel_mult]
down_block_types = []
resolution = 1
for i in range(len(block_out_channels)):
if resolution in attention_resolutions:
block_type = "SimpleCrossAttnDownBlock2D"
elif original_unet_config["resblock_updown"]:
block_type = "ResnetDownsampleBlock2D"
else:
block_type = "DownBlock2D"
down_block_types.append(block_type)
if i != len(block_out_channels) - 1:
resolution *= 2
up_block_types = []
for i in range(len(block_out_channels)):
if resolution in attention_resolutions:
block_type = "SimpleCrossAttnUpBlock2D"
elif original_unet_config["resblock_updown"]:
block_type = "ResnetUpsampleBlock2D"
else:
block_type = "UpBlock2D"
up_block_types.append(block_type)
resolution //= 2
head_dim = original_unet_config["num_head_channels"]
use_linear_projection = (
original_unet_config["use_linear_in_transformer"]
if "use_linear_in_transformer" in original_unet_config
else False
)
if use_linear_projection:
# stable diffusion 2-base-512 and 2-768
if head_dim is None:
head_dim = [5, 10, 20, 20]
projection_class_embeddings_input_dim = None
if class_embed_type is None:
if "num_classes" in original_unet_config:
if original_unet_config["num_classes"] == "sequential":
class_embed_type = "projection"
assert "adm_in_channels" in original_unet_config
projection_class_embeddings_input_dim = original_unet_config["adm_in_channels"]
else:
raise NotImplementedError(
f"Unknown conditional unet num_classes config: {original_unet_config['num_classes']}"
)
config = {
"sample_size": original_unet_config["image_size"],
"in_channels": original_unet_config["in_channels"],
"down_block_types": tuple(down_block_types),
"block_out_channels": tuple(block_out_channels),
"layers_per_block": original_unet_config["num_res_blocks"],
"cross_attention_dim": original_unet_config["encoder_channels"],
"attention_head_dim": head_dim,
"use_linear_projection": use_linear_projection,
"class_embed_type": class_embed_type,
"projection_class_embeddings_input_dim": projection_class_embeddings_input_dim,
"out_channels": original_unet_config["out_channels"],
"up_block_types": tuple(up_block_types),
"upcast_attention": False, # TODO: guessing
"cross_attention_norm": "group_norm",
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"addition_embed_type": "text",
"act_fn": "gelu",
}
if original_unet_config["use_scale_shift_norm"]:
config["resnet_time_scale_shift"] = "scale_shift"
if "encoder_dim" in original_unet_config:
config["encoder_hid_dim"] = original_unet_config["encoder_dim"]
return config
def convert_ldm_unet_checkpoint(unet_state_dict, config, path=None):
"""
Takes a state dict and a config, and returns a converted checkpoint.
"""
new_checkpoint = {}
new_checkpoint["time_embedding.linear_1.weight"] = unet_state_dict["time_embed.0.weight"]
new_checkpoint["time_embedding.linear_1.bias"] = unet_state_dict["time_embed.0.bias"]
new_checkpoint["time_embedding.linear_2.weight"] = unet_state_dict["time_embed.2.weight"]
new_checkpoint["time_embedding.linear_2.bias"] = unet_state_dict["time_embed.2.bias"]
if config["class_embed_type"] in [None, "identity"]:
# No parameters to port
...
elif config["class_embed_type"] == "timestep" or config["class_embed_type"] == "projection":
new_checkpoint["class_embedding.linear_1.weight"] = unet_state_dict["label_emb.0.0.weight"]
new_checkpoint["class_embedding.linear_1.bias"] = unet_state_dict["label_emb.0.0.bias"]
new_checkpoint["class_embedding.linear_2.weight"] = unet_state_dict["label_emb.0.2.weight"]
new_checkpoint["class_embedding.linear_2.bias"] = unet_state_dict["label_emb.0.2.bias"]
else:
raise NotImplementedError(f"Not implemented `class_embed_type`: {config['class_embed_type']}")
new_checkpoint["conv_in.weight"] = unet_state_dict["input_blocks.0.0.weight"]
new_checkpoint["conv_in.bias"] = unet_state_dict["input_blocks.0.0.bias"]
new_checkpoint["conv_norm_out.weight"] = unet_state_dict["out.0.weight"]
new_checkpoint["conv_norm_out.bias"] = unet_state_dict["out.0.bias"]
new_checkpoint["conv_out.weight"] = unet_state_dict["out.2.weight"]
new_checkpoint["conv_out.bias"] = unet_state_dict["out.2.bias"]
# Retrieves the keys for the input blocks only
num_input_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "input_blocks" in layer})
input_blocks = {
layer_id: [key for key in unet_state_dict if f"input_blocks.{layer_id}." in key]
for layer_id in range(num_input_blocks)
}
# Retrieves the keys for the middle blocks only
num_middle_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "middle_block" in layer})
middle_blocks = {
layer_id: [key for key in unet_state_dict if f"middle_block.{layer_id}" in key]
for layer_id in range(num_middle_blocks)
}
# Retrieves the keys for the output blocks only
num_output_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "output_blocks" in layer})
output_blocks = {
layer_id: [key for key in unet_state_dict if f"output_blocks.{layer_id}." in key]
for layer_id in range(num_output_blocks)
}
for i in range(1, num_input_blocks):
block_id = (i - 1) // (config["layers_per_block"] + 1)
layer_in_block_id = (i - 1) % (config["layers_per_block"] + 1)
resnets = [
key for key in input_blocks[i] if f"input_blocks.{i}.0" in key and f"input_blocks.{i}.0.op" not in key
]
attentions = [key for key in input_blocks[i] if f"input_blocks.{i}.1" in key]
if f"input_blocks.{i}.0.op.weight" in unet_state_dict:
new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.weight"] = unet_state_dict.pop(
f"input_blocks.{i}.0.op.weight"
)
new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.bias"] = unet_state_dict.pop(
f"input_blocks.{i}.0.op.bias"
)
paths = renew_resnet_paths(resnets)
# TODO need better check than i in [4, 8, 12, 16]
block_type = config["down_block_types"][block_id]
if (block_type == "ResnetDownsampleBlock2D" or block_type == "SimpleCrossAttnDownBlock2D") and i in [
4,
8,
12,
16,
]:
meta_path = {"old": f"input_blocks.{i}.0", "new": f"down_blocks.{block_id}.downsamplers.0"}
else:
meta_path = {"old": f"input_blocks.{i}.0", "new": f"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
assign_to_checkpoint(
paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
)
if len(attentions):
old_path = f"input_blocks.{i}.1"
new_path = f"down_blocks.{block_id}.attentions.{layer_in_block_id}"
assign_attention_to_checkpoint(
new_checkpoint=new_checkpoint,
unet_state_dict=unet_state_dict,
old_path=old_path,
new_path=new_path,
config=config,
)
paths = renew_attention_paths(attentions)
meta_path = {"old": old_path, "new": new_path}
assign_to_checkpoint(
paths,
new_checkpoint,
unet_state_dict,
additional_replacements=[meta_path],
config=config,
)
resnet_0 = middle_blocks[0]
attentions = middle_blocks[1]
resnet_1 = middle_blocks[2]
resnet_0_paths = renew_resnet_paths(resnet_0)
assign_to_checkpoint(resnet_0_paths, new_checkpoint, unet_state_dict, config=config)
resnet_1_paths = renew_resnet_paths(resnet_1)
assign_to_checkpoint(resnet_1_paths, new_checkpoint, unet_state_dict, config=config)
old_path = "middle_block.1"
new_path = "mid_block.attentions.0"
assign_attention_to_checkpoint(
new_checkpoint=new_checkpoint,
unet_state_dict=unet_state_dict,
old_path=old_path,
new_path=new_path,
config=config,
)
attentions_paths = renew_attention_paths(attentions)
meta_path = {"old": "middle_block.1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(
attentions_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
)
for i in range(num_output_blocks):
block_id = i // (config["layers_per_block"] + 1)
layer_in_block_id = i % (config["layers_per_block"] + 1)
output_block_layers = [shave_segments(name, 2) for name in output_blocks[i]]
output_block_list = {}
for layer in output_block_layers:
layer_id, layer_name = layer.split(".")[0], shave_segments(layer, 1)
if layer_id in output_block_list:
output_block_list[layer_id].append(layer_name)
else:
output_block_list[layer_id] = [layer_name]
# len(output_block_list) == 1 -> resnet
# len(output_block_list) == 2 -> resnet, attention
# len(output_block_list) == 3 -> resnet, attention, upscale resnet
if len(output_block_list) > 1:
resnets = [key for key in output_blocks[i] if f"output_blocks.{i}.0" in key]
attentions = [key for key in output_blocks[i] if f"output_blocks.{i}.1" in key]
paths = renew_resnet_paths(resnets)
meta_path = {"old": f"output_blocks.{i}.0", "new": f"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
assign_to_checkpoint(
paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
)
output_block_list = {k: sorted(v) for k, v in output_block_list.items()}
if ["conv.bias", "conv.weight"] in output_block_list.values():
index = list(output_block_list.values()).index(["conv.bias", "conv.weight"])
new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.weight"] = unet_state_dict[
f"output_blocks.{i}.{index}.conv.weight"
]
new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.bias"] = unet_state_dict[
f"output_blocks.{i}.{index}.conv.bias"
]
# Clear attentions as they have been attributed above.
if len(attentions) == 2:
attentions = []
if len(attentions):
old_path = f"output_blocks.{i}.1"
new_path = f"up_blocks.{block_id}.attentions.{layer_in_block_id}"
assign_attention_to_checkpoint(
new_checkpoint=new_checkpoint,
unet_state_dict=unet_state_dict,
old_path=old_path,
new_path=new_path,
config=config,
)
paths = renew_attention_paths(attentions)
meta_path = {
"old": old_path,
"new": new_path,
}
assign_to_checkpoint(
paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
)
if len(output_block_list) == 3:
resnets = [key for key in output_blocks[i] if f"output_blocks.{i}.2" in key]
paths = renew_resnet_paths(resnets)
meta_path = {"old": f"output_blocks.{i}.2", "new": f"up_blocks.{block_id}.upsamplers.0"}
assign_to_checkpoint(
paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
)
else:
resnet_0_paths = renew_resnet_paths(output_block_layers, n_shave_prefix_segments=1)
for path in resnet_0_paths:
old_path = ".".join(["output_blocks", str(i), path["old"]])
new_path = ".".join(["up_blocks", str(block_id), "resnets", str(layer_in_block_id), path["new"]])
new_checkpoint[new_path] = unet_state_dict[old_path]
if "encoder_proj.weight" in unet_state_dict:
new_checkpoint["encoder_hid_proj.weight"] = unet_state_dict.pop("encoder_proj.weight")
new_checkpoint["encoder_hid_proj.bias"] = unet_state_dict.pop("encoder_proj.bias")
if "encoder_pooling.0.weight" in unet_state_dict:
new_checkpoint["add_embedding.norm1.weight"] = unet_state_dict.pop("encoder_pooling.0.weight")
new_checkpoint["add_embedding.norm1.bias"] = unet_state_dict.pop("encoder_pooling.0.bias")
new_checkpoint["add_embedding.pool.positional_embedding"] = unet_state_dict.pop(
"encoder_pooling.1.positional_embedding"
)
new_checkpoint["add_embedding.pool.k_proj.weight"] = unet_state_dict.pop("encoder_pooling.1.k_proj.weight")
new_checkpoint["add_embedding.pool.k_proj.bias"] = unet_state_dict.pop("encoder_pooling.1.k_proj.bias")
new_checkpoint["add_embedding.pool.q_proj.weight"] = unet_state_dict.pop("encoder_pooling.1.q_proj.weight")
new_checkpoint["add_embedding.pool.q_proj.bias"] = unet_state_dict.pop("encoder_pooling.1.q_proj.bias")
new_checkpoint["add_embedding.pool.v_proj.weight"] = unet_state_dict.pop("encoder_pooling.1.v_proj.weight")
new_checkpoint["add_embedding.pool.v_proj.bias"] = unet_state_dict.pop("encoder_pooling.1.v_proj.bias")
new_checkpoint["add_embedding.proj.weight"] = unet_state_dict.pop("encoder_pooling.2.weight")
new_checkpoint["add_embedding.proj.bias"] = unet_state_dict.pop("encoder_pooling.2.bias")
new_checkpoint["add_embedding.norm2.weight"] = unet_state_dict.pop("encoder_pooling.3.weight")
new_checkpoint["add_embedding.norm2.bias"] = unet_state_dict.pop("encoder_pooling.3.bias")
return new_checkpoint
def shave_segments(path, n_shave_prefix_segments=1):
"""
Removes segments. Positive values shave the first segments, negative shave the last segments.
"""
if n_shave_prefix_segments >= 0:
return ".".join(path.split(".")[n_shave_prefix_segments:])
else:
return ".".join(path.split(".")[:n_shave_prefix_segments])
def renew_resnet_paths(old_list, n_shave_prefix_segments=0):
"""
Updates paths inside resnets to the new naming scheme (local renaming)
"""
mapping = []
for old_item in old_list:
new_item = old_item.replace("in_layers.0", "norm1")
new_item = new_item.replace("in_layers.2", "conv1")
new_item = new_item.replace("out_layers.0", "norm2")
new_item = new_item.replace("out_layers.3", "conv2")
new_item = new_item.replace("emb_layers.1", "time_emb_proj")
new_item = new_item.replace("skip_connection", "conv_shortcut")
new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
mapping.append({"old": old_item, "new": new_item})
return mapping
def renew_attention_paths(old_list, n_shave_prefix_segments=0):
"""
Updates paths inside attentions to the new naming scheme (local renaming)
"""
mapping = []
for old_item in old_list:
new_item = old_item
if "qkv" in new_item:
continue
if "encoder_kv" in new_item:
continue
new_item = new_item.replace("norm.weight", "group_norm.weight")
new_item = new_item.replace("norm.bias", "group_norm.bias")
new_item = new_item.replace("proj_out.weight", "to_out.0.weight")
new_item = new_item.replace("proj_out.bias", "to_out.0.bias")
new_item = new_item.replace("norm_encoder.weight", "norm_cross.weight")
new_item = new_item.replace("norm_encoder.bias", "norm_cross.bias")
new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
mapping.append({"old": old_item, "new": new_item})
return mapping
def assign_attention_to_checkpoint(new_checkpoint, unet_state_dict, old_path, new_path, config):
qkv_weight = unet_state_dict.pop(f"{old_path}.qkv.weight")
qkv_weight = qkv_weight[:, :, 0]
qkv_bias = unet_state_dict.pop(f"{old_path}.qkv.bias")
is_cross_attn_only = "only_cross_attention" in config and config["only_cross_attention"]
split = 1 if is_cross_attn_only else 3
weights, bias = split_attentions(
weight=qkv_weight,
bias=qkv_bias,
split=split,
chunk_size=config["attention_head_dim"],
)
if is_cross_attn_only:
query_weight, q_bias = weights, bias
new_checkpoint[f"{new_path}.to_q.weight"] = query_weight[0]
new_checkpoint[f"{new_path}.to_q.bias"] = q_bias[0]
else:
[query_weight, key_weight, value_weight], [q_bias, k_bias, v_bias] = weights, bias
new_checkpoint[f"{new_path}.to_q.weight"] = query_weight
new_checkpoint[f"{new_path}.to_q.bias"] = q_bias
new_checkpoint[f"{new_path}.to_k.weight"] = key_weight
new_checkpoint[f"{new_path}.to_k.bias"] = k_bias
new_checkpoint[f"{new_path}.to_v.weight"] = value_weight
new_checkpoint[f"{new_path}.to_v.bias"] = v_bias
encoder_kv_weight = unet_state_dict.pop(f"{old_path}.encoder_kv.weight")
encoder_kv_weight = encoder_kv_weight[:, :, 0]
encoder_kv_bias = unet_state_dict.pop(f"{old_path}.encoder_kv.bias")
[encoder_k_weight, encoder_v_weight], [encoder_k_bias, encoder_v_bias] = split_attentions(
weight=encoder_kv_weight,
bias=encoder_kv_bias,
split=2,
chunk_size=config["attention_head_dim"],
)
new_checkpoint[f"{new_path}.add_k_proj.weight"] = encoder_k_weight
new_checkpoint[f"{new_path}.add_k_proj.bias"] = encoder_k_bias
new_checkpoint[f"{new_path}.add_v_proj.weight"] = encoder_v_weight
new_checkpoint[f"{new_path}.add_v_proj.bias"] = encoder_v_bias
def assign_to_checkpoint(paths, checkpoint, old_checkpoint, additional_replacements=None, config=None):
"""
This does the final conversion step: take locally converted weights and apply a global renaming to them. It splits
attention layers, and takes into account additional replacements that may arise.
Assigns the weights to the new checkpoint.
"""
assert isinstance(paths, list), "Paths should be a list of dicts containing 'old' and 'new' keys."
for path in paths:
new_path = path["new"]
# Global renaming happens here
new_path = new_path.replace("middle_block.0", "mid_block.resnets.0")
new_path = new_path.replace("middle_block.1", "mid_block.attentions.0")
new_path = new_path.replace("middle_block.2", "mid_block.resnets.1")
if additional_replacements is not None:
for replacement in additional_replacements:
new_path = new_path.replace(replacement["old"], replacement["new"])
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path or "to_out.0.weight" in new_path:
checkpoint[new_path] = old_checkpoint[path["old"]][:, :, 0]
else:
checkpoint[new_path] = old_checkpoint[path["old"]]
# TODO maybe document and/or can do more efficiently (build indices in for loop and extract once for each split?)
def split_attentions(*, weight, bias, split, chunk_size):
weights = [None] * split
biases = [None] * split
weights_biases_idx = 0
for starting_row_index in range(0, weight.shape[0], chunk_size):
row_indices = torch.arange(starting_row_index, starting_row_index + chunk_size)
weight_rows = weight[row_indices, :]
bias_rows = bias[row_indices]
if weights[weights_biases_idx] is None:
weights[weights_biases_idx] = weight_rows
biases[weights_biases_idx] = bias_rows
else:
assert weights[weights_biases_idx] is not None
weights[weights_biases_idx] = torch.concat([weights[weights_biases_idx], weight_rows])
biases[weights_biases_idx] = torch.concat([biases[weights_biases_idx], bias_rows])
weights_biases_idx = (weights_biases_idx + 1) % split
return weights, biases
def parse_list(value):
if isinstance(value, str):
value = value.split(",")
value = [int(v) for v in value]
elif isinstance(value, list):
pass
else:
raise ValueError(f"Can't parse list for type: {type(value)}")
return value
# below is copy and pasted from original convert_if_stage_2.py script
def get_super_res_unet(unet_checkpoint_path, verify_param_count=True, sample_size=None):
orig_path = unet_checkpoint_path
original_unet_config = yaml.safe_load(os.path.join(orig_path, "config.yml"))
original_unet_config = original_unet_config["params"]
unet_diffusers_config = superres_create_unet_diffusers_config(original_unet_config)
unet_diffusers_config["time_embedding_dim"] = original_unet_config["model_channels"] * int(
original_unet_config["channel_mult"].split(",")[-1]
)
if original_unet_config["encoder_dim"] != original_unet_config["encoder_channels"]:
unet_diffusers_config["encoder_hid_dim"] = original_unet_config["encoder_dim"]
unet_diffusers_config["class_embed_type"] = "timestep"
unet_diffusers_config["addition_embed_type"] = "text"
unet_diffusers_config["time_embedding_act_fn"] = "gelu"
unet_diffusers_config["resnet_skip_time_act"] = True
unet_diffusers_config["resnet_out_scale_factor"] = 1 / 0.7071
unet_diffusers_config["mid_block_scale_factor"] = 1 / 0.7071
unet_diffusers_config["only_cross_attention"] = (
bool(original_unet_config["disable_self_attentions"])
if (
"disable_self_attentions" in original_unet_config
and isinstance(original_unet_config["disable_self_attentions"], int)
)
else True
)
if sample_size is None:
unet_diffusers_config["sample_size"] = original_unet_config["image_size"]
else:
# The second upscaler unet's sample size is incorrectly specified
# in the config and is instead hardcoded in source
unet_diffusers_config["sample_size"] = sample_size
unet_checkpoint = torch.load(os.path.join(unet_checkpoint_path, "pytorch_model.bin"), map_location="cpu")
if verify_param_count:
# check that architecture matches - is a bit slow
verify_param_count(orig_path, unet_diffusers_config)
converted_unet_checkpoint = superres_convert_ldm_unet_checkpoint(
unet_checkpoint, unet_diffusers_config, path=unet_checkpoint_path
)
converted_keys = converted_unet_checkpoint.keys()
model = UNet2DConditionModel(**unet_diffusers_config)
expected_weights = model.state_dict().keys()
diff_c_e = set(converted_keys) - set(expected_weights)
diff_e_c = set(expected_weights) - set(converted_keys)
assert len(diff_e_c) == 0, f"Expected, but not converted: {diff_e_c}"
assert len(diff_c_e) == 0, f"Converted, but not expected: {diff_c_e}"
model.load_state_dict(converted_unet_checkpoint)
return model
def superres_create_unet_diffusers_config(original_unet_config):
attention_resolutions = parse_list(original_unet_config["attention_resolutions"])
attention_resolutions = [original_unet_config["image_size"] // int(res) for res in attention_resolutions]
channel_mult = parse_list(original_unet_config["channel_mult"])
block_out_channels = [original_unet_config["model_channels"] * mult for mult in channel_mult]
down_block_types = []
resolution = 1
for i in range(len(block_out_channels)):
if resolution in attention_resolutions:
block_type = "SimpleCrossAttnDownBlock2D"
elif original_unet_config["resblock_updown"]:
block_type = "ResnetDownsampleBlock2D"
else:
block_type = "DownBlock2D"
down_block_types.append(block_type)
if i != len(block_out_channels) - 1:
resolution *= 2
up_block_types = []
for i in range(len(block_out_channels)):
if resolution in attention_resolutions:
block_type = "SimpleCrossAttnUpBlock2D"
elif original_unet_config["resblock_updown"]:
block_type = "ResnetUpsampleBlock2D"
else:
block_type = "UpBlock2D"
up_block_types.append(block_type)
resolution //= 2
head_dim = original_unet_config["num_head_channels"]
use_linear_projection = (
original_unet_config["use_linear_in_transformer"]
if "use_linear_in_transformer" in original_unet_config
else False
)
if use_linear_projection:
# stable diffusion 2-base-512 and 2-768
if head_dim is None:
head_dim = [5, 10, 20, 20]
class_embed_type = None
projection_class_embeddings_input_dim = None
if "num_classes" in original_unet_config:
if original_unet_config["num_classes"] == "sequential":
class_embed_type = "projection"
assert "adm_in_channels" in original_unet_config
projection_class_embeddings_input_dim = original_unet_config["adm_in_channels"]
else:
raise NotImplementedError(
f"Unknown conditional unet num_classes config: {original_unet_config['num_classes']}"
)
config = {
"in_channels": original_unet_config["in_channels"],
"down_block_types": tuple(down_block_types),
"block_out_channels": tuple(block_out_channels),
"layers_per_block": tuple(original_unet_config["num_res_blocks"]),
"cross_attention_dim": original_unet_config["encoder_channels"],
"attention_head_dim": head_dim,
"use_linear_projection": use_linear_projection,
"class_embed_type": class_embed_type,
"projection_class_embeddings_input_dim": projection_class_embeddings_input_dim,
"out_channels": original_unet_config["out_channels"],
"up_block_types": tuple(up_block_types),
"upcast_attention": False, # TODO: guessing
"cross_attention_norm": "group_norm",
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"act_fn": "gelu",
}
if original_unet_config["use_scale_shift_norm"]:
config["resnet_time_scale_shift"] = "scale_shift"
return config
def superres_convert_ldm_unet_checkpoint(unet_state_dict, config, path=None, extract_ema=False):
"""
Takes a state dict and a config, and returns a converted checkpoint.
"""
new_checkpoint = {}
new_checkpoint["time_embedding.linear_1.weight"] = unet_state_dict["time_embed.0.weight"]
new_checkpoint["time_embedding.linear_1.bias"] = unet_state_dict["time_embed.0.bias"]
new_checkpoint["time_embedding.linear_2.weight"] = unet_state_dict["time_embed.2.weight"]
new_checkpoint["time_embedding.linear_2.bias"] = unet_state_dict["time_embed.2.bias"]
if config["class_embed_type"] is None:
# No parameters to port
...
elif config["class_embed_type"] == "timestep" or config["class_embed_type"] == "projection":
new_checkpoint["class_embedding.linear_1.weight"] = unet_state_dict["aug_proj.0.weight"]
new_checkpoint["class_embedding.linear_1.bias"] = unet_state_dict["aug_proj.0.bias"]
new_checkpoint["class_embedding.linear_2.weight"] = unet_state_dict["aug_proj.2.weight"]
new_checkpoint["class_embedding.linear_2.bias"] = unet_state_dict["aug_proj.2.bias"]
else:
raise NotImplementedError(f"Not implemented `class_embed_type`: {config['class_embed_type']}")
if "encoder_proj.weight" in unet_state_dict:
new_checkpoint["encoder_hid_proj.weight"] = unet_state_dict["encoder_proj.weight"]
new_checkpoint["encoder_hid_proj.bias"] = unet_state_dict["encoder_proj.bias"]
if "encoder_pooling.0.weight" in unet_state_dict:
mapping = {
"encoder_pooling.0": "add_embedding.norm1",
"encoder_pooling.1": "add_embedding.pool",
"encoder_pooling.2": "add_embedding.proj",
"encoder_pooling.3": "add_embedding.norm2",
}
for key in unet_state_dict.keys():
if key.startswith("encoder_pooling"):
prefix = key[: len("encoder_pooling.0")]
new_key = key.replace(prefix, mapping[prefix])
new_checkpoint[new_key] = unet_state_dict[key]
new_checkpoint["conv_in.weight"] = unet_state_dict["input_blocks.0.0.weight"]
new_checkpoint["conv_in.bias"] = unet_state_dict["input_blocks.0.0.bias"]
new_checkpoint["conv_norm_out.weight"] = unet_state_dict["out.0.weight"]
new_checkpoint["conv_norm_out.bias"] = unet_state_dict["out.0.bias"]
new_checkpoint["conv_out.weight"] = unet_state_dict["out.2.weight"]
new_checkpoint["conv_out.bias"] = unet_state_dict["out.2.bias"]
# Retrieves the keys for the input blocks only
num_input_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "input_blocks" in layer})
input_blocks = {
layer_id: [key for key in unet_state_dict if f"input_blocks.{layer_id}." in key]
for layer_id in range(num_input_blocks)
}
# Retrieves the keys for the middle blocks only
num_middle_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "middle_block" in layer})
middle_blocks = {
layer_id: [key for key in unet_state_dict if f"middle_block.{layer_id}" in key]
for layer_id in range(num_middle_blocks)
}
# Retrieves the keys for the output blocks only
num_output_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "output_blocks" in layer})
output_blocks = {
layer_id: [key for key in unet_state_dict if f"output_blocks.{layer_id}." in key]
for layer_id in range(num_output_blocks)
}
if not isinstance(config["layers_per_block"], int):
layers_per_block_list = [e + 1 for e in config["layers_per_block"]]
layers_per_block_cumsum = list(np.cumsum(layers_per_block_list))
downsampler_ids = layers_per_block_cumsum
else:
# TODO need better check than i in [4, 8, 12, 16]
downsampler_ids = [4, 8, 12, 16]
for i in range(1, num_input_blocks):
if isinstance(config["layers_per_block"], int):
layers_per_block = config["layers_per_block"]
block_id = (i - 1) // (layers_per_block + 1)
layer_in_block_id = (i - 1) % (layers_per_block + 1)
else:
block_id = next(k for k, n in enumerate(layers_per_block_cumsum) if (i - 1) < n)
passed_blocks = layers_per_block_cumsum[block_id - 1] if block_id > 0 else 0
layer_in_block_id = (i - 1) - passed_blocks
resnets = [
key for key in input_blocks[i] if f"input_blocks.{i}.0" in key and f"input_blocks.{i}.0.op" not in key
]
attentions = [key for key in input_blocks[i] if f"input_blocks.{i}.1" in key]
if f"input_blocks.{i}.0.op.weight" in unet_state_dict:
new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.weight"] = unet_state_dict.pop(
f"input_blocks.{i}.0.op.weight"
)
new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.bias"] = unet_state_dict.pop(
f"input_blocks.{i}.0.op.bias"
)
paths = renew_resnet_paths(resnets)
block_type = config["down_block_types"][block_id]
if (
block_type == "ResnetDownsampleBlock2D" or block_type == "SimpleCrossAttnDownBlock2D"
) and i in downsampler_ids:
meta_path = {"old": f"input_blocks.{i}.0", "new": f"down_blocks.{block_id}.downsamplers.0"}
else:
meta_path = {"old": f"input_blocks.{i}.0", "new": f"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
assign_to_checkpoint(
paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
)
if len(attentions):
old_path = f"input_blocks.{i}.1"
new_path = f"down_blocks.{block_id}.attentions.{layer_in_block_id}"
assign_attention_to_checkpoint(
new_checkpoint=new_checkpoint,
unet_state_dict=unet_state_dict,
old_path=old_path,
new_path=new_path,
config=config,
)
paths = renew_attention_paths(attentions)
meta_path = {"old": old_path, "new": new_path}
assign_to_checkpoint(
paths,
new_checkpoint,
unet_state_dict,
additional_replacements=[meta_path],
config=config,
)
resnet_0 = middle_blocks[0]
attentions = middle_blocks[1]
resnet_1 = middle_blocks[2]
resnet_0_paths = renew_resnet_paths(resnet_0)
assign_to_checkpoint(resnet_0_paths, new_checkpoint, unet_state_dict, config=config)
resnet_1_paths = renew_resnet_paths(resnet_1)
assign_to_checkpoint(resnet_1_paths, new_checkpoint, unet_state_dict, config=config)
old_path = "middle_block.1"
new_path = "mid_block.attentions.0"
assign_attention_to_checkpoint(
new_checkpoint=new_checkpoint,
unet_state_dict=unet_state_dict,
old_path=old_path,
new_path=new_path,
config=config,
)
attentions_paths = renew_attention_paths(attentions)
meta_path = {"old": "middle_block.1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(
attentions_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
)
if not isinstance(config["layers_per_block"], int):
layers_per_block_list = list(reversed([e + 1 for e in config["layers_per_block"]]))
layers_per_block_cumsum = list(np.cumsum(layers_per_block_list))
for i in range(num_output_blocks):
if isinstance(config["layers_per_block"], int):
layers_per_block = config["layers_per_block"]
block_id = i // (layers_per_block + 1)
layer_in_block_id = i % (layers_per_block + 1)
else:
block_id = next(k for k, n in enumerate(layers_per_block_cumsum) if i < n)
passed_blocks = layers_per_block_cumsum[block_id - 1] if block_id > 0 else 0
layer_in_block_id = i - passed_blocks
output_block_layers = [shave_segments(name, 2) for name in output_blocks[i]]
output_block_list = {}
for layer in output_block_layers:
layer_id, layer_name = layer.split(".")[0], shave_segments(layer, 1)
if layer_id in output_block_list:
output_block_list[layer_id].append(layer_name)
else:
output_block_list[layer_id] = [layer_name]
# len(output_block_list) == 1 -> resnet
# len(output_block_list) == 2 -> resnet, attention or resnet, upscale resnet
# len(output_block_list) == 3 -> resnet, attention, upscale resnet
if len(output_block_list) > 1:
resnets = [key for key in output_blocks[i] if f"output_blocks.{i}.0" in key]
has_attention = True
if len(output_block_list) == 2 and any("in_layers" in k for k in output_block_list["1"]):
has_attention = False
maybe_attentions = [key for key in output_blocks[i] if f"output_blocks.{i}.1" in key]
paths = renew_resnet_paths(resnets)
meta_path = {"old": f"output_blocks.{i}.0", "new": f"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
assign_to_checkpoint(
paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
)
output_block_list = {k: sorted(v) for k, v in output_block_list.items()}
if ["conv.bias", "conv.weight"] in output_block_list.values():
index = list(output_block_list.values()).index(["conv.bias", "conv.weight"])
new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.weight"] = unet_state_dict[
f"output_blocks.{i}.{index}.conv.weight"
]
new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.bias"] = unet_state_dict[
f"output_blocks.{i}.{index}.conv.bias"
]
# this layer was no attention
has_attention = False
maybe_attentions = []
if has_attention:
old_path = f"output_blocks.{i}.1"
new_path = f"up_blocks.{block_id}.attentions.{layer_in_block_id}"
assign_attention_to_checkpoint(
new_checkpoint=new_checkpoint,
unet_state_dict=unet_state_dict,
old_path=old_path,
new_path=new_path,
config=config,
)
paths = renew_attention_paths(maybe_attentions)
meta_path = {
"old": old_path,
"new": new_path,
}
assign_to_checkpoint(
paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
)
if len(output_block_list) == 3 or (not has_attention and len(maybe_attentions) > 0):
layer_id = len(output_block_list) - 1
resnets = [key for key in output_blocks[i] if f"output_blocks.{i}.{layer_id}" in key]
paths = renew_resnet_paths(resnets)
meta_path = {"old": f"output_blocks.{i}.{layer_id}", "new": f"up_blocks.{block_id}.upsamplers.0"}
assign_to_checkpoint(
paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
)
else:
resnet_0_paths = renew_resnet_paths(output_block_layers, n_shave_prefix_segments=1)
for path in resnet_0_paths:
old_path = ".".join(["output_blocks", str(i), path["old"]])
new_path = ".".join(["up_blocks", str(block_id), "resnets", str(layer_in_block_id), path["new"]])
new_checkpoint[new_path] = unet_state_dict[old_path]
return new_checkpoint
def verify_param_count(orig_path, unet_diffusers_config):
if "-II-" in orig_path:
from deepfloyd_if.modules import IFStageII
if_II = IFStageII(device="cpu", dir_or_name=orig_path)
elif "-III-" in orig_path:
from deepfloyd_if.modules import IFStageIII
if_II = IFStageIII(device="cpu", dir_or_name=orig_path)
else:
assert f"Weird name. Should have -II- or -III- in path: {orig_path}"
unet = UNet2DConditionModel(**unet_diffusers_config)
# in params
assert_param_count(unet.time_embedding, if_II.model.time_embed)
assert_param_count(unet.conv_in, if_II.model.input_blocks[:1])
# downblocks
assert_param_count(unet.down_blocks[0], if_II.model.input_blocks[1:4])
assert_param_count(unet.down_blocks[1], if_II.model.input_blocks[4:7])
assert_param_count(unet.down_blocks[2], if_II.model.input_blocks[7:11])
if "-II-" in orig_path:
assert_param_count(unet.down_blocks[3], if_II.model.input_blocks[11:17])
assert_param_count(unet.down_blocks[4], if_II.model.input_blocks[17:])
if "-III-" in orig_path:
assert_param_count(unet.down_blocks[3], if_II.model.input_blocks[11:15])
assert_param_count(unet.down_blocks[4], if_II.model.input_blocks[15:20])
assert_param_count(unet.down_blocks[5], if_II.model.input_blocks[20:])
# mid block
assert_param_count(unet.mid_block, if_II.model.middle_block)
# up block
if "-II-" in orig_path:
assert_param_count(unet.up_blocks[0], if_II.model.output_blocks[:6])
assert_param_count(unet.up_blocks[1], if_II.model.output_blocks[6:12])
assert_param_count(unet.up_blocks[2], if_II.model.output_blocks[12:16])
assert_param_count(unet.up_blocks[3], if_II.model.output_blocks[16:19])
assert_param_count(unet.up_blocks[4], if_II.model.output_blocks[19:])
if "-III-" in orig_path:
assert_param_count(unet.up_blocks[0], if_II.model.output_blocks[:5])
assert_param_count(unet.up_blocks[1], if_II.model.output_blocks[5:10])
assert_param_count(unet.up_blocks[2], if_II.model.output_blocks[10:14])
assert_param_count(unet.up_blocks[3], if_II.model.output_blocks[14:18])
assert_param_count(unet.up_blocks[4], if_II.model.output_blocks[18:21])
assert_param_count(unet.up_blocks[5], if_II.model.output_blocks[21:24])
# out params
assert_param_count(unet.conv_norm_out, if_II.model.out[0])
assert_param_count(unet.conv_out, if_II.model.out[2])
# make sure all model architecture has same param count
assert_param_count(unet, if_II.model)
def assert_param_count(model_1, model_2):
count_1 = sum(p.numel() for p in model_1.parameters())
count_2 = sum(p.numel() for p in model_2.parameters())
assert count_1 == count_2, f"{model_1.__class__}: {count_1} != {model_2.__class__}: {count_2}"
def superres_check_against_original(dump_path, unet_checkpoint_path):
model_path = dump_path
model = UNet2DConditionModel.from_pretrained(model_path)
model.to("cuda")
orig_path = unet_checkpoint_path
if "-II-" in orig_path:
from deepfloyd_if.modules import IFStageII
if_II_model = IFStageII(device="cuda", dir_or_name=orig_path, model_kwargs={"precision": "fp32"}).model
elif "-III-" in orig_path:
from deepfloyd_if.modules import IFStageIII
if_II_model = IFStageIII(device="cuda", dir_or_name=orig_path, model_kwargs={"precision": "fp32"}).model
batch_size = 1
channels = model.config.in_channels // 2
height = model.config.sample_size
width = model.config.sample_size
height = 1024
width = 1024
torch.manual_seed(0)
latents = torch.randn((batch_size, channels, height, width), device=model.device)
image_small = torch.randn((batch_size, channels, height // 4, width // 4), device=model.device)
interpolate_antialias = {}
if "antialias" in inspect.signature(F.interpolate).parameters:
interpolate_antialias["antialias"] = True
image_upscaled = F.interpolate(
image_small, size=[height, width], mode="bicubic", align_corners=False, **interpolate_antialias
)
latent_model_input = torch.cat([latents, image_upscaled], dim=1).to(model.dtype)
t = torch.tensor([5], device=model.device).to(model.dtype)
seq_len = 64
encoder_hidden_states = torch.randn((batch_size, seq_len, model.config.encoder_hid_dim), device=model.device).to(
model.dtype
)
fake_class_labels = torch.tensor([t], device=model.device).to(model.dtype)
with torch.no_grad():
out = if_II_model(latent_model_input, t, aug_steps=fake_class_labels, text_emb=encoder_hidden_states)
if_II_model.to("cpu")
del if_II_model
import gc
torch.cuda.empty_cache()
gc.collect()
print(50 * "=")
with torch.no_grad():
noise_pred = model(
sample=latent_model_input,
encoder_hidden_states=encoder_hidden_states,
class_labels=fake_class_labels,
timestep=t,
).sample
print("Out shape", noise_pred.shape)
print("Diff", (out - noise_pred).abs().sum())
if __name__ == "__main__":
main(parse_args())
| diffusers/scripts/convert_if.py/0 | {
"file_path": "diffusers/scripts/convert_if.py",
"repo_id": "diffusers",
"token_count": 23054
} | 120 |
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Conversion script for the T2I-Adapter checkpoints.
"""
import argparse
import torch
from diffusers import T2IAdapter
def convert_adapter(src_state, in_channels):
original_body_length = max([int(x.split(".")[1]) for x in src_state.keys() if "body." in x]) + 1
assert original_body_length == 8
# (0, 1) -> channels 1
assert src_state["body.0.block1.weight"].shape == (320, 320, 3, 3)
# (2, 3) -> channels 2
assert src_state["body.2.in_conv.weight"].shape == (640, 320, 1, 1)
# (4, 5) -> channels 3
assert src_state["body.4.in_conv.weight"].shape == (1280, 640, 1, 1)
# (6, 7) -> channels 4
assert src_state["body.6.block1.weight"].shape == (1280, 1280, 3, 3)
res_state = {
"adapter.conv_in.weight": src_state.pop("conv_in.weight"),
"adapter.conv_in.bias": src_state.pop("conv_in.bias"),
# 0.resnets.0
"adapter.body.0.resnets.0.block1.weight": src_state.pop("body.0.block1.weight"),
"adapter.body.0.resnets.0.block1.bias": src_state.pop("body.0.block1.bias"),
"adapter.body.0.resnets.0.block2.weight": src_state.pop("body.0.block2.weight"),
"adapter.body.0.resnets.0.block2.bias": src_state.pop("body.0.block2.bias"),
# 0.resnets.1
"adapter.body.0.resnets.1.block1.weight": src_state.pop("body.1.block1.weight"),
"adapter.body.0.resnets.1.block1.bias": src_state.pop("body.1.block1.bias"),
"adapter.body.0.resnets.1.block2.weight": src_state.pop("body.1.block2.weight"),
"adapter.body.0.resnets.1.block2.bias": src_state.pop("body.1.block2.bias"),
# 1
"adapter.body.1.in_conv.weight": src_state.pop("body.2.in_conv.weight"),
"adapter.body.1.in_conv.bias": src_state.pop("body.2.in_conv.bias"),
# 1.resnets.0
"adapter.body.1.resnets.0.block1.weight": src_state.pop("body.2.block1.weight"),
"adapter.body.1.resnets.0.block1.bias": src_state.pop("body.2.block1.bias"),
"adapter.body.1.resnets.0.block2.weight": src_state.pop("body.2.block2.weight"),
"adapter.body.1.resnets.0.block2.bias": src_state.pop("body.2.block2.bias"),
# 1.resnets.1
"adapter.body.1.resnets.1.block1.weight": src_state.pop("body.3.block1.weight"),
"adapter.body.1.resnets.1.block1.bias": src_state.pop("body.3.block1.bias"),
"adapter.body.1.resnets.1.block2.weight": src_state.pop("body.3.block2.weight"),
"adapter.body.1.resnets.1.block2.bias": src_state.pop("body.3.block2.bias"),
# 2
"adapter.body.2.in_conv.weight": src_state.pop("body.4.in_conv.weight"),
"adapter.body.2.in_conv.bias": src_state.pop("body.4.in_conv.bias"),
# 2.resnets.0
"adapter.body.2.resnets.0.block1.weight": src_state.pop("body.4.block1.weight"),
"adapter.body.2.resnets.0.block1.bias": src_state.pop("body.4.block1.bias"),
"adapter.body.2.resnets.0.block2.weight": src_state.pop("body.4.block2.weight"),
"adapter.body.2.resnets.0.block2.bias": src_state.pop("body.4.block2.bias"),
# 2.resnets.1
"adapter.body.2.resnets.1.block1.weight": src_state.pop("body.5.block1.weight"),
"adapter.body.2.resnets.1.block1.bias": src_state.pop("body.5.block1.bias"),
"adapter.body.2.resnets.1.block2.weight": src_state.pop("body.5.block2.weight"),
"adapter.body.2.resnets.1.block2.bias": src_state.pop("body.5.block2.bias"),
# 3.resnets.0
"adapter.body.3.resnets.0.block1.weight": src_state.pop("body.6.block1.weight"),
"adapter.body.3.resnets.0.block1.bias": src_state.pop("body.6.block1.bias"),
"adapter.body.3.resnets.0.block2.weight": src_state.pop("body.6.block2.weight"),
"adapter.body.3.resnets.0.block2.bias": src_state.pop("body.6.block2.bias"),
# 3.resnets.1
"adapter.body.3.resnets.1.block1.weight": src_state.pop("body.7.block1.weight"),
"adapter.body.3.resnets.1.block1.bias": src_state.pop("body.7.block1.bias"),
"adapter.body.3.resnets.1.block2.weight": src_state.pop("body.7.block2.weight"),
"adapter.body.3.resnets.1.block2.bias": src_state.pop("body.7.block2.bias"),
}
assert len(src_state) == 0
adapter = T2IAdapter(in_channels=in_channels, adapter_type="full_adapter")
adapter.load_state_dict(res_state)
return adapter
def convert_light_adapter(src_state):
original_body_length = max([int(x.split(".")[1]) for x in src_state.keys() if "body." in x]) + 1
assert original_body_length == 4
res_state = {
# body.0.in_conv
"adapter.body.0.in_conv.weight": src_state.pop("body.0.in_conv.weight"),
"adapter.body.0.in_conv.bias": src_state.pop("body.0.in_conv.bias"),
# body.0.resnets.0
"adapter.body.0.resnets.0.block1.weight": src_state.pop("body.0.body.0.block1.weight"),
"adapter.body.0.resnets.0.block1.bias": src_state.pop("body.0.body.0.block1.bias"),
"adapter.body.0.resnets.0.block2.weight": src_state.pop("body.0.body.0.block2.weight"),
"adapter.body.0.resnets.0.block2.bias": src_state.pop("body.0.body.0.block2.bias"),
# body.0.resnets.1
"adapter.body.0.resnets.1.block1.weight": src_state.pop("body.0.body.1.block1.weight"),
"adapter.body.0.resnets.1.block1.bias": src_state.pop("body.0.body.1.block1.bias"),
"adapter.body.0.resnets.1.block2.weight": src_state.pop("body.0.body.1.block2.weight"),
"adapter.body.0.resnets.1.block2.bias": src_state.pop("body.0.body.1.block2.bias"),
# body.0.resnets.2
"adapter.body.0.resnets.2.block1.weight": src_state.pop("body.0.body.2.block1.weight"),
"adapter.body.0.resnets.2.block1.bias": src_state.pop("body.0.body.2.block1.bias"),
"adapter.body.0.resnets.2.block2.weight": src_state.pop("body.0.body.2.block2.weight"),
"adapter.body.0.resnets.2.block2.bias": src_state.pop("body.0.body.2.block2.bias"),
# body.0.resnets.3
"adapter.body.0.resnets.3.block1.weight": src_state.pop("body.0.body.3.block1.weight"),
"adapter.body.0.resnets.3.block1.bias": src_state.pop("body.0.body.3.block1.bias"),
"adapter.body.0.resnets.3.block2.weight": src_state.pop("body.0.body.3.block2.weight"),
"adapter.body.0.resnets.3.block2.bias": src_state.pop("body.0.body.3.block2.bias"),
# body.0.out_conv
"adapter.body.0.out_conv.weight": src_state.pop("body.0.out_conv.weight"),
"adapter.body.0.out_conv.bias": src_state.pop("body.0.out_conv.bias"),
# body.1.in_conv
"adapter.body.1.in_conv.weight": src_state.pop("body.1.in_conv.weight"),
"adapter.body.1.in_conv.bias": src_state.pop("body.1.in_conv.bias"),
# body.1.resnets.0
"adapter.body.1.resnets.0.block1.weight": src_state.pop("body.1.body.0.block1.weight"),
"adapter.body.1.resnets.0.block1.bias": src_state.pop("body.1.body.0.block1.bias"),
"adapter.body.1.resnets.0.block2.weight": src_state.pop("body.1.body.0.block2.weight"),
"adapter.body.1.resnets.0.block2.bias": src_state.pop("body.1.body.0.block2.bias"),
# body.1.resnets.1
"adapter.body.1.resnets.1.block1.weight": src_state.pop("body.1.body.1.block1.weight"),
"adapter.body.1.resnets.1.block1.bias": src_state.pop("body.1.body.1.block1.bias"),
"adapter.body.1.resnets.1.block2.weight": src_state.pop("body.1.body.1.block2.weight"),
"adapter.body.1.resnets.1.block2.bias": src_state.pop("body.1.body.1.block2.bias"),
# body.1.body.2
"adapter.body.1.resnets.2.block1.weight": src_state.pop("body.1.body.2.block1.weight"),
"adapter.body.1.resnets.2.block1.bias": src_state.pop("body.1.body.2.block1.bias"),
"adapter.body.1.resnets.2.block2.weight": src_state.pop("body.1.body.2.block2.weight"),
"adapter.body.1.resnets.2.block2.bias": src_state.pop("body.1.body.2.block2.bias"),
# body.1.body.3
"adapter.body.1.resnets.3.block1.weight": src_state.pop("body.1.body.3.block1.weight"),
"adapter.body.1.resnets.3.block1.bias": src_state.pop("body.1.body.3.block1.bias"),
"adapter.body.1.resnets.3.block2.weight": src_state.pop("body.1.body.3.block2.weight"),
"adapter.body.1.resnets.3.block2.bias": src_state.pop("body.1.body.3.block2.bias"),
# body.1.out_conv
"adapter.body.1.out_conv.weight": src_state.pop("body.1.out_conv.weight"),
"adapter.body.1.out_conv.bias": src_state.pop("body.1.out_conv.bias"),
# body.2.in_conv
"adapter.body.2.in_conv.weight": src_state.pop("body.2.in_conv.weight"),
"adapter.body.2.in_conv.bias": src_state.pop("body.2.in_conv.bias"),
# body.2.body.0
"adapter.body.2.resnets.0.block1.weight": src_state.pop("body.2.body.0.block1.weight"),
"adapter.body.2.resnets.0.block1.bias": src_state.pop("body.2.body.0.block1.bias"),
"adapter.body.2.resnets.0.block2.weight": src_state.pop("body.2.body.0.block2.weight"),
"adapter.body.2.resnets.0.block2.bias": src_state.pop("body.2.body.0.block2.bias"),
# body.2.body.1
"adapter.body.2.resnets.1.block1.weight": src_state.pop("body.2.body.1.block1.weight"),
"adapter.body.2.resnets.1.block1.bias": src_state.pop("body.2.body.1.block1.bias"),
"adapter.body.2.resnets.1.block2.weight": src_state.pop("body.2.body.1.block2.weight"),
"adapter.body.2.resnets.1.block2.bias": src_state.pop("body.2.body.1.block2.bias"),
# body.2.body.2
"adapter.body.2.resnets.2.block1.weight": src_state.pop("body.2.body.2.block1.weight"),
"adapter.body.2.resnets.2.block1.bias": src_state.pop("body.2.body.2.block1.bias"),
"adapter.body.2.resnets.2.block2.weight": src_state.pop("body.2.body.2.block2.weight"),
"adapter.body.2.resnets.2.block2.bias": src_state.pop("body.2.body.2.block2.bias"),
# body.2.body.3
"adapter.body.2.resnets.3.block1.weight": src_state.pop("body.2.body.3.block1.weight"),
"adapter.body.2.resnets.3.block1.bias": src_state.pop("body.2.body.3.block1.bias"),
"adapter.body.2.resnets.3.block2.weight": src_state.pop("body.2.body.3.block2.weight"),
"adapter.body.2.resnets.3.block2.bias": src_state.pop("body.2.body.3.block2.bias"),
# body.2.out_conv
"adapter.body.2.out_conv.weight": src_state.pop("body.2.out_conv.weight"),
"adapter.body.2.out_conv.bias": src_state.pop("body.2.out_conv.bias"),
# body.3.in_conv
"adapter.body.3.in_conv.weight": src_state.pop("body.3.in_conv.weight"),
"adapter.body.3.in_conv.bias": src_state.pop("body.3.in_conv.bias"),
# body.3.body.0
"adapter.body.3.resnets.0.block1.weight": src_state.pop("body.3.body.0.block1.weight"),
"adapter.body.3.resnets.0.block1.bias": src_state.pop("body.3.body.0.block1.bias"),
"adapter.body.3.resnets.0.block2.weight": src_state.pop("body.3.body.0.block2.weight"),
"adapter.body.3.resnets.0.block2.bias": src_state.pop("body.3.body.0.block2.bias"),
# body.3.body.1
"adapter.body.3.resnets.1.block1.weight": src_state.pop("body.3.body.1.block1.weight"),
"adapter.body.3.resnets.1.block1.bias": src_state.pop("body.3.body.1.block1.bias"),
"adapter.body.3.resnets.1.block2.weight": src_state.pop("body.3.body.1.block2.weight"),
"adapter.body.3.resnets.1.block2.bias": src_state.pop("body.3.body.1.block2.bias"),
# body.3.body.2
"adapter.body.3.resnets.2.block1.weight": src_state.pop("body.3.body.2.block1.weight"),
"adapter.body.3.resnets.2.block1.bias": src_state.pop("body.3.body.2.block1.bias"),
"adapter.body.3.resnets.2.block2.weight": src_state.pop("body.3.body.2.block2.weight"),
"adapter.body.3.resnets.2.block2.bias": src_state.pop("body.3.body.2.block2.bias"),
# body.3.body.3
"adapter.body.3.resnets.3.block1.weight": src_state.pop("body.3.body.3.block1.weight"),
"adapter.body.3.resnets.3.block1.bias": src_state.pop("body.3.body.3.block1.bias"),
"adapter.body.3.resnets.3.block2.weight": src_state.pop("body.3.body.3.block2.weight"),
"adapter.body.3.resnets.3.block2.bias": src_state.pop("body.3.body.3.block2.bias"),
# body.3.out_conv
"adapter.body.3.out_conv.weight": src_state.pop("body.3.out_conv.weight"),
"adapter.body.3.out_conv.bias": src_state.pop("body.3.out_conv.bias"),
}
assert len(src_state) == 0
adapter = T2IAdapter(in_channels=3, channels=[320, 640, 1280], num_res_blocks=4, adapter_type="light_adapter")
adapter.load_state_dict(res_state)
return adapter
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--output_path", default=None, type=str, required=True, help="Path to the store the result checkpoint."
)
parser.add_argument(
"--is_adapter_light",
action="store_true",
help="Is checkpoint come from Adapter-Light architecture. ex: color-adapter",
)
parser.add_argument("--in_channels", required=False, type=int, help="Input channels for non-light adapter")
args = parser.parse_args()
src_state = torch.load(args.checkpoint_path)
if args.is_adapter_light:
adapter = convert_light_adapter(src_state)
else:
if args.in_channels is None:
raise ValueError("set `--in_channels=<n>`")
adapter = convert_adapter(src_state, args.in_channels)
adapter.save_pretrained(args.output_path)
| diffusers/scripts/convert_original_t2i_adapter.py/0 | {
"file_path": "diffusers/scripts/convert_original_t2i_adapter.py",
"repo_id": "diffusers",
"token_count": 6734
} | 121 |
# Run inside root directory of official source code: https://github.com/dome272/wuerstchen/
import os
import torch
from transformers import AutoTokenizer, CLIPTextModel
from vqgan import VQModel
from diffusers import (
DDPMWuerstchenScheduler,
WuerstchenCombinedPipeline,
WuerstchenDecoderPipeline,
WuerstchenPriorPipeline,
)
from diffusers.pipelines.wuerstchen import PaellaVQModel, WuerstchenDiffNeXt, WuerstchenPrior
model_path = "models/"
device = "cpu"
paella_vqmodel = VQModel()
state_dict = torch.load(os.path.join(model_path, "vqgan_f4_v1_500k.pt"), map_location=device)["state_dict"]
paella_vqmodel.load_state_dict(state_dict)
state_dict["vquantizer.embedding.weight"] = state_dict["vquantizer.codebook.weight"]
state_dict.pop("vquantizer.codebook.weight")
vqmodel = PaellaVQModel(num_vq_embeddings=paella_vqmodel.codebook_size, latent_channels=paella_vqmodel.c_latent)
vqmodel.load_state_dict(state_dict)
# Clip Text encoder and tokenizer
text_encoder = CLIPTextModel.from_pretrained("laion/CLIP-ViT-bigG-14-laion2B-39B-b160k")
tokenizer = AutoTokenizer.from_pretrained("laion/CLIP-ViT-bigG-14-laion2B-39B-b160k")
# Generator
gen_text_encoder = CLIPTextModel.from_pretrained("laion/CLIP-ViT-H-14-laion2B-s32B-b79K").to("cpu")
gen_tokenizer = AutoTokenizer.from_pretrained("laion/CLIP-ViT-H-14-laion2B-s32B-b79K")
orig_state_dict = torch.load(os.path.join(model_path, "model_v2_stage_b.pt"), map_location=device)["state_dict"]
state_dict = {}
for key in orig_state_dict.keys():
if key.endswith("in_proj_weight"):
weights = orig_state_dict[key].chunk(3, 0)
state_dict[key.replace("attn.in_proj_weight", "to_q.weight")] = weights[0]
state_dict[key.replace("attn.in_proj_weight", "to_k.weight")] = weights[1]
state_dict[key.replace("attn.in_proj_weight", "to_v.weight")] = weights[2]
elif key.endswith("in_proj_bias"):
weights = orig_state_dict[key].chunk(3, 0)
state_dict[key.replace("attn.in_proj_bias", "to_q.bias")] = weights[0]
state_dict[key.replace("attn.in_proj_bias", "to_k.bias")] = weights[1]
state_dict[key.replace("attn.in_proj_bias", "to_v.bias")] = weights[2]
elif key.endswith("out_proj.weight"):
weights = orig_state_dict[key]
state_dict[key.replace("attn.out_proj.weight", "to_out.0.weight")] = weights
elif key.endswith("out_proj.bias"):
weights = orig_state_dict[key]
state_dict[key.replace("attn.out_proj.bias", "to_out.0.bias")] = weights
else:
state_dict[key] = orig_state_dict[key]
deocder = WuerstchenDiffNeXt()
deocder.load_state_dict(state_dict)
# Prior
orig_state_dict = torch.load(os.path.join(model_path, "model_v3_stage_c.pt"), map_location=device)["ema_state_dict"]
state_dict = {}
for key in orig_state_dict.keys():
if key.endswith("in_proj_weight"):
weights = orig_state_dict[key].chunk(3, 0)
state_dict[key.replace("attn.in_proj_weight", "to_q.weight")] = weights[0]
state_dict[key.replace("attn.in_proj_weight", "to_k.weight")] = weights[1]
state_dict[key.replace("attn.in_proj_weight", "to_v.weight")] = weights[2]
elif key.endswith("in_proj_bias"):
weights = orig_state_dict[key].chunk(3, 0)
state_dict[key.replace("attn.in_proj_bias", "to_q.bias")] = weights[0]
state_dict[key.replace("attn.in_proj_bias", "to_k.bias")] = weights[1]
state_dict[key.replace("attn.in_proj_bias", "to_v.bias")] = weights[2]
elif key.endswith("out_proj.weight"):
weights = orig_state_dict[key]
state_dict[key.replace("attn.out_proj.weight", "to_out.0.weight")] = weights
elif key.endswith("out_proj.bias"):
weights = orig_state_dict[key]
state_dict[key.replace("attn.out_proj.bias", "to_out.0.bias")] = weights
else:
state_dict[key] = orig_state_dict[key]
prior_model = WuerstchenPrior(c_in=16, c=1536, c_cond=1280, c_r=64, depth=32, nhead=24).to(device)
prior_model.load_state_dict(state_dict)
# scheduler
scheduler = DDPMWuerstchenScheduler()
# Prior pipeline
prior_pipeline = WuerstchenPriorPipeline(
prior=prior_model, text_encoder=text_encoder, tokenizer=tokenizer, scheduler=scheduler
)
prior_pipeline.save_pretrained("warp-ai/wuerstchen-prior")
decoder_pipeline = WuerstchenDecoderPipeline(
text_encoder=gen_text_encoder, tokenizer=gen_tokenizer, vqgan=vqmodel, decoder=deocder, scheduler=scheduler
)
decoder_pipeline.save_pretrained("warp-ai/wuerstchen")
# Wuerstchen pipeline
wuerstchen_pipeline = WuerstchenCombinedPipeline(
# Decoder
text_encoder=gen_text_encoder,
tokenizer=gen_tokenizer,
decoder=deocder,
scheduler=scheduler,
vqgan=vqmodel,
# Prior
prior_tokenizer=tokenizer,
prior_text_encoder=text_encoder,
prior=prior_model,
prior_scheduler=scheduler,
)
wuerstchen_pipeline.save_pretrained("warp-ai/WuerstchenCombinedPipeline")
| diffusers/scripts/convert_wuerstchen.py/0 | {
"file_path": "diffusers/scripts/convert_wuerstchen.py",
"repo_id": "diffusers",
"token_count": 2171
} | 122 |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
import tqdm
from ...models.unets.unet_1d import UNet1DModel
from ...pipelines import DiffusionPipeline
from ...utils.dummy_pt_objects import DDPMScheduler
from ...utils.torch_utils import randn_tensor
class ValueGuidedRLPipeline(DiffusionPipeline):
r"""
Pipeline for value-guided sampling from a diffusion model trained to predict sequences of states.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
Parameters:
value_function ([`UNet1DModel`]):
A specialized UNet for fine-tuning trajectories base on reward.
unet ([`UNet1DModel`]):
UNet architecture to denoise the encoded trajectories.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded trajectories. Default for this
application is [`DDPMScheduler`].
env ():
An environment following the OpenAI gym API to act in. For now only Hopper has pretrained models.
"""
def __init__(
self,
value_function: UNet1DModel,
unet: UNet1DModel,
scheduler: DDPMScheduler,
env,
):
super().__init__()
self.register_modules(value_function=value_function, unet=unet, scheduler=scheduler, env=env)
self.data = env.get_dataset()
self.means = {}
for key in self.data.keys():
try:
self.means[key] = self.data[key].mean()
except: # noqa: E722
pass
self.stds = {}
for key in self.data.keys():
try:
self.stds[key] = self.data[key].std()
except: # noqa: E722
pass
self.state_dim = env.observation_space.shape[0]
self.action_dim = env.action_space.shape[0]
def normalize(self, x_in, key):
return (x_in - self.means[key]) / self.stds[key]
def de_normalize(self, x_in, key):
return x_in * self.stds[key] + self.means[key]
def to_torch(self, x_in):
if isinstance(x_in, dict):
return {k: self.to_torch(v) for k, v in x_in.items()}
elif torch.is_tensor(x_in):
return x_in.to(self.unet.device)
return torch.tensor(x_in, device=self.unet.device)
def reset_x0(self, x_in, cond, act_dim):
for key, val in cond.items():
x_in[:, key, act_dim:] = val.clone()
return x_in
def run_diffusion(self, x, conditions, n_guide_steps, scale):
batch_size = x.shape[0]
y = None
for i in tqdm.tqdm(self.scheduler.timesteps):
# create batch of timesteps to pass into model
timesteps = torch.full((batch_size,), i, device=self.unet.device, dtype=torch.long)
for _ in range(n_guide_steps):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
y = self.value_function(x.permute(0, 2, 1), timesteps).sample
grad = torch.autograd.grad([y.sum()], [x])[0]
posterior_variance = self.scheduler._get_variance(i)
model_std = torch.exp(0.5 * posterior_variance)
grad = model_std * grad
grad[timesteps < 2] = 0
x = x.detach()
x = x + scale * grad
x = self.reset_x0(x, conditions, self.action_dim)
prev_x = self.unet(x.permute(0, 2, 1), timesteps).sample.permute(0, 2, 1)
# TODO: verify deprecation of this kwarg
x = self.scheduler.step(prev_x, i, x)["prev_sample"]
# apply conditions to the trajectory (set the initial state)
x = self.reset_x0(x, conditions, self.action_dim)
x = self.to_torch(x)
return x, y
def __call__(self, obs, batch_size=64, planning_horizon=32, n_guide_steps=2, scale=0.1):
# normalize the observations and create batch dimension
obs = self.normalize(obs, "observations")
obs = obs[None].repeat(batch_size, axis=0)
conditions = {0: self.to_torch(obs)}
shape = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
x1 = randn_tensor(shape, device=self.unet.device)
x = self.reset_x0(x1, conditions, self.action_dim)
x = self.to_torch(x)
# run the diffusion process
x, y = self.run_diffusion(x, conditions, n_guide_steps, scale)
# sort output trajectories by value
sorted_idx = y.argsort(0, descending=True).squeeze()
sorted_values = x[sorted_idx]
actions = sorted_values[:, :, : self.action_dim]
actions = actions.detach().cpu().numpy()
denorm_actions = self.de_normalize(actions, key="actions")
# select the action with the highest value
if y is not None:
selected_index = 0
else:
# if we didn't run value guiding, select a random action
selected_index = np.random.randint(0, batch_size)
denorm_actions = denorm_actions[selected_index, 0]
return denorm_actions
| diffusers/src/diffusers/experimental/rl/value_guided_sampling.py/0 | {
"file_path": "diffusers/src/diffusers/experimental/rl/value_guided_sampling.py",
"repo_id": "diffusers",
"token_count": 2639
} | 123 |
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import deprecate
ACTIVATION_FUNCTIONS = {
"swish": nn.SiLU(),
"silu": nn.SiLU(),
"mish": nn.Mish(),
"gelu": nn.GELU(),
"relu": nn.ReLU(),
}
def get_activation(act_fn: str) -> nn.Module:
"""Helper function to get activation function from string.
Args:
act_fn (str): Name of activation function.
Returns:
nn.Module: Activation function.
"""
act_fn = act_fn.lower()
if act_fn in ACTIVATION_FUNCTIONS:
return ACTIVATION_FUNCTIONS[act_fn]
else:
raise ValueError(f"Unsupported activation function: {act_fn}")
class GELU(nn.Module):
r"""
GELU activation function with tanh approximation support with `approximate="tanh"`.
Parameters:
dim_in (`int`): The number of channels in the input.
dim_out (`int`): The number of channels in the output.
approximate (`str`, *optional*, defaults to `"none"`): If `"tanh"`, use tanh approximation.
bias (`bool`, defaults to True): Whether to use a bias in the linear layer.
"""
def __init__(self, dim_in: int, dim_out: int, approximate: str = "none", bias: bool = True):
super().__init__()
self.proj = nn.Linear(dim_in, dim_out, bias=bias)
self.approximate = approximate
def gelu(self, gate: torch.Tensor) -> torch.Tensor:
if gate.device.type != "mps":
return F.gelu(gate, approximate=self.approximate)
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.float32), approximate=self.approximate).to(dtype=gate.dtype)
def forward(self, hidden_states):
hidden_states = self.proj(hidden_states)
hidden_states = self.gelu(hidden_states)
return hidden_states
class GEGLU(nn.Module):
r"""
A [variant](https://arxiv.org/abs/2002.05202) of the gated linear unit activation function.
Parameters:
dim_in (`int`): The number of channels in the input.
dim_out (`int`): The number of channels in the output.
bias (`bool`, defaults to True): Whether to use a bias in the linear layer.
"""
def __init__(self, dim_in: int, dim_out: int, bias: bool = True):
super().__init__()
self.proj = nn.Linear(dim_in, dim_out * 2, bias=bias)
def gelu(self, gate: torch.Tensor) -> torch.Tensor:
if gate.device.type != "mps":
return F.gelu(gate)
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.float32)).to(dtype=gate.dtype)
def forward(self, hidden_states, *args, **kwargs):
if len(args) > 0 or kwargs.get("scale", None) is not None:
deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`."
deprecate("scale", "1.0.0", deprecation_message)
hidden_states, gate = self.proj(hidden_states).chunk(2, dim=-1)
return hidden_states * self.gelu(gate)
class ApproximateGELU(nn.Module):
r"""
The approximate form of the Gaussian Error Linear Unit (GELU). For more details, see section 2 of this
[paper](https://arxiv.org/abs/1606.08415).
Parameters:
dim_in (`int`): The number of channels in the input.
dim_out (`int`): The number of channels in the output.
bias (`bool`, defaults to True): Whether to use a bias in the linear layer.
"""
def __init__(self, dim_in: int, dim_out: int, bias: bool = True):
super().__init__()
self.proj = nn.Linear(dim_in, dim_out, bias=bias)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.proj(x)
return x * torch.sigmoid(1.702 * x)
| diffusers/src/diffusers/models/activations.py/0 | {
"file_path": "diffusers/src/diffusers/models/activations.py",
"repo_id": "diffusers",
"token_count": 1735
} | 124 |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from torch import nn
from ..utils import deprecate
from .activations import get_activation
from .attention_processor import Attention
def get_timestep_embedding(
timesteps: torch.Tensor,
embedding_dim: int,
flip_sin_to_cos: bool = False,
downscale_freq_shift: float = 1,
scale: float = 1,
max_period: int = 10000,
):
"""
This matches the implementation in Denoising Diffusion Probabilistic Models: Create sinusoidal timestep embeddings.
:param timesteps: a 1-D Tensor of N indices, one per batch element.
These may be fractional.
:param embedding_dim: the dimension of the output. :param max_period: controls the minimum frequency of the
embeddings. :return: an [N x dim] Tensor of positional embeddings.
"""
assert len(timesteps.shape) == 1, "Timesteps should be a 1d-array"
half_dim = embedding_dim // 2
exponent = -math.log(max_period) * torch.arange(
start=0, end=half_dim, dtype=torch.float32, device=timesteps.device
)
exponent = exponent / (half_dim - downscale_freq_shift)
emb = torch.exp(exponent)
emb = timesteps[:, None].float() * emb[None, :]
# scale embeddings
emb = scale * emb
# concat sine and cosine embeddings
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=-1)
# flip sine and cosine embeddings
if flip_sin_to_cos:
emb = torch.cat([emb[:, half_dim:], emb[:, :half_dim]], dim=-1)
# zero pad
if embedding_dim % 2 == 1:
emb = torch.nn.functional.pad(emb, (0, 1, 0, 0))
return emb
def get_2d_sincos_pos_embed(
embed_dim, grid_size, cls_token=False, extra_tokens=0, interpolation_scale=1.0, base_size=16
):
"""
grid_size: int of the grid height and width return: pos_embed: [grid_size*grid_size, embed_dim] or
[1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)
"""
if isinstance(grid_size, int):
grid_size = (grid_size, grid_size)
grid_h = np.arange(grid_size[0], dtype=np.float32) / (grid_size[0] / base_size) / interpolation_scale
grid_w = np.arange(grid_size[1], dtype=np.float32) / (grid_size[1] / base_size) / interpolation_scale
grid = np.meshgrid(grid_w, grid_h) # here w goes first
grid = np.stack(grid, axis=0)
grid = grid.reshape([2, 1, grid_size[1], grid_size[0]])
pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
if cls_token and extra_tokens > 0:
pos_embed = np.concatenate([np.zeros([extra_tokens, embed_dim]), pos_embed], axis=0)
return pos_embed
def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
if embed_dim % 2 != 0:
raise ValueError("embed_dim must be divisible by 2")
# use half of dimensions to encode grid_h
emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2)
emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2)
emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D)
return emb
def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
"""
embed_dim: output dimension for each position pos: a list of positions to be encoded: size (M,) out: (M, D)
"""
if embed_dim % 2 != 0:
raise ValueError("embed_dim must be divisible by 2")
omega = np.arange(embed_dim // 2, dtype=np.float64)
omega /= embed_dim / 2.0
omega = 1.0 / 10000**omega # (D/2,)
pos = pos.reshape(-1) # (M,)
out = np.einsum("m,d->md", pos, omega) # (M, D/2), outer product
emb_sin = np.sin(out) # (M, D/2)
emb_cos = np.cos(out) # (M, D/2)
emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)
return emb
class PatchEmbed(nn.Module):
"""2D Image to Patch Embedding"""
def __init__(
self,
height=224,
width=224,
patch_size=16,
in_channels=3,
embed_dim=768,
layer_norm=False,
flatten=True,
bias=True,
interpolation_scale=1,
):
super().__init__()
num_patches = (height // patch_size) * (width // patch_size)
self.flatten = flatten
self.layer_norm = layer_norm
self.proj = nn.Conv2d(
in_channels, embed_dim, kernel_size=(patch_size, patch_size), stride=patch_size, bias=bias
)
if layer_norm:
self.norm = nn.LayerNorm(embed_dim, elementwise_affine=False, eps=1e-6)
else:
self.norm = None
self.patch_size = patch_size
# See:
# https://github.com/PixArt-alpha/PixArt-alpha/blob/0f55e922376d8b797edd44d25d0e7464b260dcab/diffusion/model/nets/PixArtMS.py#L161
self.height, self.width = height // patch_size, width // patch_size
self.base_size = height // patch_size
self.interpolation_scale = interpolation_scale
pos_embed = get_2d_sincos_pos_embed(
embed_dim, int(num_patches**0.5), base_size=self.base_size, interpolation_scale=self.interpolation_scale
)
self.register_buffer("pos_embed", torch.from_numpy(pos_embed).float().unsqueeze(0), persistent=False)
def forward(self, latent):
height, width = latent.shape[-2] // self.patch_size, latent.shape[-1] // self.patch_size
latent = self.proj(latent)
if self.flatten:
latent = latent.flatten(2).transpose(1, 2) # BCHW -> BNC
if self.layer_norm:
latent = self.norm(latent)
# Interpolate positional embeddings if needed.
# (For PixArt-Alpha: https://github.com/PixArt-alpha/PixArt-alpha/blob/0f55e922376d8b797edd44d25d0e7464b260dcab/diffusion/model/nets/PixArtMS.py#L162C151-L162C160)
if self.height != height or self.width != width:
pos_embed = get_2d_sincos_pos_embed(
embed_dim=self.pos_embed.shape[-1],
grid_size=(height, width),
base_size=self.base_size,
interpolation_scale=self.interpolation_scale,
)
pos_embed = torch.from_numpy(pos_embed)
pos_embed = pos_embed.float().unsqueeze(0).to(latent.device)
else:
pos_embed = self.pos_embed
return (latent + pos_embed).to(latent.dtype)
class TimestepEmbedding(nn.Module):
def __init__(
self,
in_channels: int,
time_embed_dim: int,
act_fn: str = "silu",
out_dim: int = None,
post_act_fn: Optional[str] = None,
cond_proj_dim=None,
sample_proj_bias=True,
):
super().__init__()
linear_cls = nn.Linear
self.linear_1 = linear_cls(in_channels, time_embed_dim, sample_proj_bias)
if cond_proj_dim is not None:
self.cond_proj = nn.Linear(cond_proj_dim, in_channels, bias=False)
else:
self.cond_proj = None
self.act = get_activation(act_fn)
if out_dim is not None:
time_embed_dim_out = out_dim
else:
time_embed_dim_out = time_embed_dim
self.linear_2 = linear_cls(time_embed_dim, time_embed_dim_out, sample_proj_bias)
if post_act_fn is None:
self.post_act = None
else:
self.post_act = get_activation(post_act_fn)
def forward(self, sample, condition=None):
if condition is not None:
sample = sample + self.cond_proj(condition)
sample = self.linear_1(sample)
if self.act is not None:
sample = self.act(sample)
sample = self.linear_2(sample)
if self.post_act is not None:
sample = self.post_act(sample)
return sample
class Timesteps(nn.Module):
def __init__(self, num_channels: int, flip_sin_to_cos: bool, downscale_freq_shift: float):
super().__init__()
self.num_channels = num_channels
self.flip_sin_to_cos = flip_sin_to_cos
self.downscale_freq_shift = downscale_freq_shift
def forward(self, timesteps):
t_emb = get_timestep_embedding(
timesteps,
self.num_channels,
flip_sin_to_cos=self.flip_sin_to_cos,
downscale_freq_shift=self.downscale_freq_shift,
)
return t_emb
class GaussianFourierProjection(nn.Module):
"""Gaussian Fourier embeddings for noise levels."""
def __init__(
self, embedding_size: int = 256, scale: float = 1.0, set_W_to_weight=True, log=True, flip_sin_to_cos=False
):
super().__init__()
self.weight = nn.Parameter(torch.randn(embedding_size) * scale, requires_grad=False)
self.log = log
self.flip_sin_to_cos = flip_sin_to_cos
if set_W_to_weight:
# to delete later
self.W = nn.Parameter(torch.randn(embedding_size) * scale, requires_grad=False)
self.weight = self.W
def forward(self, x):
if self.log:
x = torch.log(x)
x_proj = x[:, None] * self.weight[None, :] * 2 * np.pi
if self.flip_sin_to_cos:
out = torch.cat([torch.cos(x_proj), torch.sin(x_proj)], dim=-1)
else:
out = torch.cat([torch.sin(x_proj), torch.cos(x_proj)], dim=-1)
return out
class SinusoidalPositionalEmbedding(nn.Module):
"""Apply positional information to a sequence of embeddings.
Takes in a sequence of embeddings with shape (batch_size, seq_length, embed_dim) and adds positional embeddings to
them
Args:
embed_dim: (int): Dimension of the positional embedding.
max_seq_length: Maximum sequence length to apply positional embeddings
"""
def __init__(self, embed_dim: int, max_seq_length: int = 32):
super().__init__()
position = torch.arange(max_seq_length).unsqueeze(1)
div_term = torch.exp(torch.arange(0, embed_dim, 2) * (-math.log(10000.0) / embed_dim))
pe = torch.zeros(1, max_seq_length, embed_dim)
pe[0, :, 0::2] = torch.sin(position * div_term)
pe[0, :, 1::2] = torch.cos(position * div_term)
self.register_buffer("pe", pe)
def forward(self, x):
_, seq_length, _ = x.shape
x = x + self.pe[:, :seq_length]
return x
class ImagePositionalEmbeddings(nn.Module):
"""
Converts latent image classes into vector embeddings. Sums the vector embeddings with positional embeddings for the
height and width of the latent space.
For more details, see figure 10 of the dall-e paper: https://arxiv.org/abs/2102.12092
For VQ-diffusion:
Output vector embeddings are used as input for the transformer.
Note that the vector embeddings for the transformer are different than the vector embeddings from the VQVAE.
Args:
num_embed (`int`):
Number of embeddings for the latent pixels embeddings.
height (`int`):
Height of the latent image i.e. the number of height embeddings.
width (`int`):
Width of the latent image i.e. the number of width embeddings.
embed_dim (`int`):
Dimension of the produced vector embeddings. Used for the latent pixel, height, and width embeddings.
"""
def __init__(
self,
num_embed: int,
height: int,
width: int,
embed_dim: int,
):
super().__init__()
self.height = height
self.width = width
self.num_embed = num_embed
self.embed_dim = embed_dim
self.emb = nn.Embedding(self.num_embed, embed_dim)
self.height_emb = nn.Embedding(self.height, embed_dim)
self.width_emb = nn.Embedding(self.width, embed_dim)
def forward(self, index):
emb = self.emb(index)
height_emb = self.height_emb(torch.arange(self.height, device=index.device).view(1, self.height))
# 1 x H x D -> 1 x H x 1 x D
height_emb = height_emb.unsqueeze(2)
width_emb = self.width_emb(torch.arange(self.width, device=index.device).view(1, self.width))
# 1 x W x D -> 1 x 1 x W x D
width_emb = width_emb.unsqueeze(1)
pos_emb = height_emb + width_emb
# 1 x H x W x D -> 1 x L xD
pos_emb = pos_emb.view(1, self.height * self.width, -1)
emb = emb + pos_emb[:, : emb.shape[1], :]
return emb
class LabelEmbedding(nn.Module):
"""
Embeds class labels into vector representations. Also handles label dropout for classifier-free guidance.
Args:
num_classes (`int`): The number of classes.
hidden_size (`int`): The size of the vector embeddings.
dropout_prob (`float`): The probability of dropping a label.
"""
def __init__(self, num_classes, hidden_size, dropout_prob):
super().__init__()
use_cfg_embedding = dropout_prob > 0
self.embedding_table = nn.Embedding(num_classes + use_cfg_embedding, hidden_size)
self.num_classes = num_classes
self.dropout_prob = dropout_prob
def token_drop(self, labels, force_drop_ids=None):
"""
Drops labels to enable classifier-free guidance.
"""
if force_drop_ids is None:
drop_ids = torch.rand(labels.shape[0], device=labels.device) < self.dropout_prob
else:
drop_ids = torch.tensor(force_drop_ids == 1)
labels = torch.where(drop_ids, self.num_classes, labels)
return labels
def forward(self, labels: torch.LongTensor, force_drop_ids=None):
use_dropout = self.dropout_prob > 0
if (self.training and use_dropout) or (force_drop_ids is not None):
labels = self.token_drop(labels, force_drop_ids)
embeddings = self.embedding_table(labels)
return embeddings
class TextImageProjection(nn.Module):
def __init__(
self,
text_embed_dim: int = 1024,
image_embed_dim: int = 768,
cross_attention_dim: int = 768,
num_image_text_embeds: int = 10,
):
super().__init__()
self.num_image_text_embeds = num_image_text_embeds
self.image_embeds = nn.Linear(image_embed_dim, self.num_image_text_embeds * cross_attention_dim)
self.text_proj = nn.Linear(text_embed_dim, cross_attention_dim)
def forward(self, text_embeds: torch.FloatTensor, image_embeds: torch.FloatTensor):
batch_size = text_embeds.shape[0]
# image
image_text_embeds = self.image_embeds(image_embeds)
image_text_embeds = image_text_embeds.reshape(batch_size, self.num_image_text_embeds, -1)
# text
text_embeds = self.text_proj(text_embeds)
return torch.cat([image_text_embeds, text_embeds], dim=1)
class ImageProjection(nn.Module):
def __init__(
self,
image_embed_dim: int = 768,
cross_attention_dim: int = 768,
num_image_text_embeds: int = 32,
):
super().__init__()
self.num_image_text_embeds = num_image_text_embeds
self.image_embeds = nn.Linear(image_embed_dim, self.num_image_text_embeds * cross_attention_dim)
self.norm = nn.LayerNorm(cross_attention_dim)
def forward(self, image_embeds: torch.FloatTensor):
batch_size = image_embeds.shape[0]
# image
image_embeds = self.image_embeds(image_embeds)
image_embeds = image_embeds.reshape(batch_size, self.num_image_text_embeds, -1)
image_embeds = self.norm(image_embeds)
return image_embeds
class IPAdapterFullImageProjection(nn.Module):
def __init__(self, image_embed_dim=1024, cross_attention_dim=1024):
super().__init__()
from .attention import FeedForward
self.ff = FeedForward(image_embed_dim, cross_attention_dim, mult=1, activation_fn="gelu")
self.norm = nn.LayerNorm(cross_attention_dim)
def forward(self, image_embeds: torch.FloatTensor):
return self.norm(self.ff(image_embeds))
class CombinedTimestepLabelEmbeddings(nn.Module):
def __init__(self, num_classes, embedding_dim, class_dropout_prob=0.1):
super().__init__()
self.time_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=1)
self.timestep_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim)
self.class_embedder = LabelEmbedding(num_classes, embedding_dim, class_dropout_prob)
def forward(self, timestep, class_labels, hidden_dtype=None):
timesteps_proj = self.time_proj(timestep)
timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=hidden_dtype)) # (N, D)
class_labels = self.class_embedder(class_labels) # (N, D)
conditioning = timesteps_emb + class_labels # (N, D)
return conditioning
class TextTimeEmbedding(nn.Module):
def __init__(self, encoder_dim: int, time_embed_dim: int, num_heads: int = 64):
super().__init__()
self.norm1 = nn.LayerNorm(encoder_dim)
self.pool = AttentionPooling(num_heads, encoder_dim)
self.proj = nn.Linear(encoder_dim, time_embed_dim)
self.norm2 = nn.LayerNorm(time_embed_dim)
def forward(self, hidden_states):
hidden_states = self.norm1(hidden_states)
hidden_states = self.pool(hidden_states)
hidden_states = self.proj(hidden_states)
hidden_states = self.norm2(hidden_states)
return hidden_states
class TextImageTimeEmbedding(nn.Module):
def __init__(self, text_embed_dim: int = 768, image_embed_dim: int = 768, time_embed_dim: int = 1536):
super().__init__()
self.text_proj = nn.Linear(text_embed_dim, time_embed_dim)
self.text_norm = nn.LayerNorm(time_embed_dim)
self.image_proj = nn.Linear(image_embed_dim, time_embed_dim)
def forward(self, text_embeds: torch.FloatTensor, image_embeds: torch.FloatTensor):
# text
time_text_embeds = self.text_proj(text_embeds)
time_text_embeds = self.text_norm(time_text_embeds)
# image
time_image_embeds = self.image_proj(image_embeds)
return time_image_embeds + time_text_embeds
class ImageTimeEmbedding(nn.Module):
def __init__(self, image_embed_dim: int = 768, time_embed_dim: int = 1536):
super().__init__()
self.image_proj = nn.Linear(image_embed_dim, time_embed_dim)
self.image_norm = nn.LayerNorm(time_embed_dim)
def forward(self, image_embeds: torch.FloatTensor):
# image
time_image_embeds = self.image_proj(image_embeds)
time_image_embeds = self.image_norm(time_image_embeds)
return time_image_embeds
class ImageHintTimeEmbedding(nn.Module):
def __init__(self, image_embed_dim: int = 768, time_embed_dim: int = 1536):
super().__init__()
self.image_proj = nn.Linear(image_embed_dim, time_embed_dim)
self.image_norm = nn.LayerNorm(time_embed_dim)
self.input_hint_block = nn.Sequential(
nn.Conv2d(3, 16, 3, padding=1),
nn.SiLU(),
nn.Conv2d(16, 16, 3, padding=1),
nn.SiLU(),
nn.Conv2d(16, 32, 3, padding=1, stride=2),
nn.SiLU(),
nn.Conv2d(32, 32, 3, padding=1),
nn.SiLU(),
nn.Conv2d(32, 96, 3, padding=1, stride=2),
nn.SiLU(),
nn.Conv2d(96, 96, 3, padding=1),
nn.SiLU(),
nn.Conv2d(96, 256, 3, padding=1, stride=2),
nn.SiLU(),
nn.Conv2d(256, 4, 3, padding=1),
)
def forward(self, image_embeds: torch.FloatTensor, hint: torch.FloatTensor):
# image
time_image_embeds = self.image_proj(image_embeds)
time_image_embeds = self.image_norm(time_image_embeds)
hint = self.input_hint_block(hint)
return time_image_embeds, hint
class AttentionPooling(nn.Module):
# Copied from https://github.com/deep-floyd/IF/blob/2f91391f27dd3c468bf174be5805b4cc92980c0b/deepfloyd_if/model/nn.py#L54
def __init__(self, num_heads, embed_dim, dtype=None):
super().__init__()
self.dtype = dtype
self.positional_embedding = nn.Parameter(torch.randn(1, embed_dim) / embed_dim**0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim, dtype=self.dtype)
self.q_proj = nn.Linear(embed_dim, embed_dim, dtype=self.dtype)
self.v_proj = nn.Linear(embed_dim, embed_dim, dtype=self.dtype)
self.num_heads = num_heads
self.dim_per_head = embed_dim // self.num_heads
def forward(self, x):
bs, length, width = x.size()
def shape(x):
# (bs, length, width) --> (bs, length, n_heads, dim_per_head)
x = x.view(bs, -1, self.num_heads, self.dim_per_head)
# (bs, length, n_heads, dim_per_head) --> (bs, n_heads, length, dim_per_head)
x = x.transpose(1, 2)
# (bs, n_heads, length, dim_per_head) --> (bs*n_heads, length, dim_per_head)
x = x.reshape(bs * self.num_heads, -1, self.dim_per_head)
# (bs*n_heads, length, dim_per_head) --> (bs*n_heads, dim_per_head, length)
x = x.transpose(1, 2)
return x
class_token = x.mean(dim=1, keepdim=True) + self.positional_embedding.to(x.dtype)
x = torch.cat([class_token, x], dim=1) # (bs, length+1, width)
# (bs*n_heads, class_token_length, dim_per_head)
q = shape(self.q_proj(class_token))
# (bs*n_heads, length+class_token_length, dim_per_head)
k = shape(self.k_proj(x))
v = shape(self.v_proj(x))
# (bs*n_heads, class_token_length, length+class_token_length):
scale = 1 / math.sqrt(math.sqrt(self.dim_per_head))
weight = torch.einsum("bct,bcs->bts", q * scale, k * scale) # More stable with f16 than dividing afterwards
weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype)
# (bs*n_heads, dim_per_head, class_token_length)
a = torch.einsum("bts,bcs->bct", weight, v)
# (bs, length+1, width)
a = a.reshape(bs, -1, 1).transpose(1, 2)
return a[:, 0, :] # cls_token
def get_fourier_embeds_from_boundingbox(embed_dim, box):
"""
Args:
embed_dim: int
box: a 3-D tensor [B x N x 4] representing the bounding boxes for GLIGEN pipeline
Returns:
[B x N x embed_dim] tensor of positional embeddings
"""
batch_size, num_boxes = box.shape[:2]
emb = 100 ** (torch.arange(embed_dim) / embed_dim)
emb = emb[None, None, None].to(device=box.device, dtype=box.dtype)
emb = emb * box.unsqueeze(-1)
emb = torch.stack((emb.sin(), emb.cos()), dim=-1)
emb = emb.permute(0, 1, 3, 4, 2).reshape(batch_size, num_boxes, embed_dim * 2 * 4)
return emb
class GLIGENTextBoundingboxProjection(nn.Module):
def __init__(self, positive_len, out_dim, feature_type="text-only", fourier_freqs=8):
super().__init__()
self.positive_len = positive_len
self.out_dim = out_dim
self.fourier_embedder_dim = fourier_freqs
self.position_dim = fourier_freqs * 2 * 4 # 2: sin/cos, 4: xyxy
if isinstance(out_dim, tuple):
out_dim = out_dim[0]
if feature_type == "text-only":
self.linears = nn.Sequential(
nn.Linear(self.positive_len + self.position_dim, 512),
nn.SiLU(),
nn.Linear(512, 512),
nn.SiLU(),
nn.Linear(512, out_dim),
)
self.null_positive_feature = torch.nn.Parameter(torch.zeros([self.positive_len]))
elif feature_type == "text-image":
self.linears_text = nn.Sequential(
nn.Linear(self.positive_len + self.position_dim, 512),
nn.SiLU(),
nn.Linear(512, 512),
nn.SiLU(),
nn.Linear(512, out_dim),
)
self.linears_image = nn.Sequential(
nn.Linear(self.positive_len + self.position_dim, 512),
nn.SiLU(),
nn.Linear(512, 512),
nn.SiLU(),
nn.Linear(512, out_dim),
)
self.null_text_feature = torch.nn.Parameter(torch.zeros([self.positive_len]))
self.null_image_feature = torch.nn.Parameter(torch.zeros([self.positive_len]))
self.null_position_feature = torch.nn.Parameter(torch.zeros([self.position_dim]))
def forward(
self,
boxes,
masks,
positive_embeddings=None,
phrases_masks=None,
image_masks=None,
phrases_embeddings=None,
image_embeddings=None,
):
masks = masks.unsqueeze(-1)
# embedding position (it may includes padding as placeholder)
xyxy_embedding = get_fourier_embeds_from_boundingbox(self.fourier_embedder_dim, boxes) # B*N*4 -> B*N*C
# learnable null embedding
xyxy_null = self.null_position_feature.view(1, 1, -1)
# replace padding with learnable null embedding
xyxy_embedding = xyxy_embedding * masks + (1 - masks) * xyxy_null
# positionet with text only information
if positive_embeddings is not None:
# learnable null embedding
positive_null = self.null_positive_feature.view(1, 1, -1)
# replace padding with learnable null embedding
positive_embeddings = positive_embeddings * masks + (1 - masks) * positive_null
objs = self.linears(torch.cat([positive_embeddings, xyxy_embedding], dim=-1))
# positionet with text and image infomation
else:
phrases_masks = phrases_masks.unsqueeze(-1)
image_masks = image_masks.unsqueeze(-1)
# learnable null embedding
text_null = self.null_text_feature.view(1, 1, -1)
image_null = self.null_image_feature.view(1, 1, -1)
# replace padding with learnable null embedding
phrases_embeddings = phrases_embeddings * phrases_masks + (1 - phrases_masks) * text_null
image_embeddings = image_embeddings * image_masks + (1 - image_masks) * image_null
objs_text = self.linears_text(torch.cat([phrases_embeddings, xyxy_embedding], dim=-1))
objs_image = self.linears_image(torch.cat([image_embeddings, xyxy_embedding], dim=-1))
objs = torch.cat([objs_text, objs_image], dim=1)
return objs
class PixArtAlphaCombinedTimestepSizeEmbeddings(nn.Module):
"""
For PixArt-Alpha.
Reference:
https://github.com/PixArt-alpha/PixArt-alpha/blob/0f55e922376d8b797edd44d25d0e7464b260dcab/diffusion/model/nets/PixArtMS.py#L164C9-L168C29
"""
def __init__(self, embedding_dim, size_emb_dim, use_additional_conditions: bool = False):
super().__init__()
self.outdim = size_emb_dim
self.time_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0)
self.timestep_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim)
self.use_additional_conditions = use_additional_conditions
if use_additional_conditions:
self.additional_condition_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0)
self.resolution_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=size_emb_dim)
self.aspect_ratio_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=size_emb_dim)
def forward(self, timestep, resolution, aspect_ratio, batch_size, hidden_dtype):
timesteps_proj = self.time_proj(timestep)
timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=hidden_dtype)) # (N, D)
if self.use_additional_conditions:
resolution_emb = self.additional_condition_proj(resolution.flatten()).to(hidden_dtype)
resolution_emb = self.resolution_embedder(resolution_emb).reshape(batch_size, -1)
aspect_ratio_emb = self.additional_condition_proj(aspect_ratio.flatten()).to(hidden_dtype)
aspect_ratio_emb = self.aspect_ratio_embedder(aspect_ratio_emb).reshape(batch_size, -1)
conditioning = timesteps_emb + torch.cat([resolution_emb, aspect_ratio_emb], dim=1)
else:
conditioning = timesteps_emb
return conditioning
class PixArtAlphaTextProjection(nn.Module):
"""
Projects caption embeddings. Also handles dropout for classifier-free guidance.
Adapted from https://github.com/PixArt-alpha/PixArt-alpha/blob/master/diffusion/model/nets/PixArt_blocks.py
"""
def __init__(self, in_features, hidden_size, num_tokens=120):
super().__init__()
self.linear_1 = nn.Linear(in_features=in_features, out_features=hidden_size, bias=True)
self.act_1 = nn.GELU(approximate="tanh")
self.linear_2 = nn.Linear(in_features=hidden_size, out_features=hidden_size, bias=True)
def forward(self, caption):
hidden_states = self.linear_1(caption)
hidden_states = self.act_1(hidden_states)
hidden_states = self.linear_2(hidden_states)
return hidden_states
class IPAdapterPlusImageProjection(nn.Module):
"""Resampler of IP-Adapter Plus.
Args:
----
embed_dims (int): The feature dimension. Defaults to 768.
output_dims (int): The number of output channels, that is the same
number of the channels in the
`unet.config.cross_attention_dim`. Defaults to 1024.
hidden_dims (int): The number of hidden channels. Defaults to 1280.
depth (int): The number of blocks. Defaults to 8.
dim_head (int): The number of head channels. Defaults to 64.
heads (int): Parallel attention heads. Defaults to 16.
num_queries (int): The number of queries. Defaults to 8.
ffn_ratio (float): The expansion ratio of feedforward network hidden
layer channels. Defaults to 4.
"""
def __init__(
self,
embed_dims: int = 768,
output_dims: int = 1024,
hidden_dims: int = 1280,
depth: int = 4,
dim_head: int = 64,
heads: int = 16,
num_queries: int = 8,
ffn_ratio: float = 4,
) -> None:
super().__init__()
from .attention import FeedForward # Lazy import to avoid circular import
self.latents = nn.Parameter(torch.randn(1, num_queries, hidden_dims) / hidden_dims**0.5)
self.proj_in = nn.Linear(embed_dims, hidden_dims)
self.proj_out = nn.Linear(hidden_dims, output_dims)
self.norm_out = nn.LayerNorm(output_dims)
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(
nn.ModuleList(
[
nn.LayerNorm(hidden_dims),
nn.LayerNorm(hidden_dims),
Attention(
query_dim=hidden_dims,
dim_head=dim_head,
heads=heads,
out_bias=False,
),
nn.Sequential(
nn.LayerNorm(hidden_dims),
FeedForward(hidden_dims, hidden_dims, activation_fn="gelu", mult=ffn_ratio, bias=False),
),
]
)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward pass.
Args:
----
x (torch.Tensor): Input Tensor.
Returns:
-------
torch.Tensor: Output Tensor.
"""
latents = self.latents.repeat(x.size(0), 1, 1)
x = self.proj_in(x)
for ln0, ln1, attn, ff in self.layers:
residual = latents
encoder_hidden_states = ln0(x)
latents = ln1(latents)
encoder_hidden_states = torch.cat([encoder_hidden_states, latents], dim=-2)
latents = attn(latents, encoder_hidden_states) + residual
latents = ff(latents) + latents
latents = self.proj_out(latents)
return self.norm_out(latents)
class MultiIPAdapterImageProjection(nn.Module):
def __init__(self, IPAdapterImageProjectionLayers: Union[List[nn.Module], Tuple[nn.Module]]):
super().__init__()
self.image_projection_layers = nn.ModuleList(IPAdapterImageProjectionLayers)
def forward(self, image_embeds: List[torch.FloatTensor]):
projected_image_embeds = []
# currently, we accept `image_embeds` as
# 1. a tensor (deprecated) with shape [batch_size, embed_dim] or [batch_size, sequence_length, embed_dim]
# 2. list of `n` tensors where `n` is number of ip-adapters, each tensor can hae shape [batch_size, num_images, embed_dim] or [batch_size, num_images, sequence_length, embed_dim]
if not isinstance(image_embeds, list):
deprecation_message = (
"You have passed a tensor as `image_embeds`.This is deprecated and will be removed in a future release."
" Please make sure to update your script to pass `image_embeds` as a list of tensors to supress this warning."
)
deprecate("image_embeds not a list", "1.0.0", deprecation_message, standard_warn=False)
image_embeds = [image_embeds.unsqueeze(1)]
if len(image_embeds) != len(self.image_projection_layers):
raise ValueError(
f"image_embeds must have the same length as image_projection_layers, got {len(image_embeds)} and {len(self.image_projection_layers)}"
)
for image_embed, image_projection_layer in zip(image_embeds, self.image_projection_layers):
batch_size, num_images = image_embed.shape[0], image_embed.shape[1]
image_embed = image_embed.reshape((batch_size * num_images,) + image_embed.shape[2:])
image_embed = image_projection_layer(image_embed)
image_embed = image_embed.reshape((batch_size, num_images) + image_embed.shape[1:])
projected_image_embeds.append(image_embed)
return projected_image_embeds
| diffusers/src/diffusers/models/embeddings.py/0 | {
"file_path": "diffusers/src/diffusers/models/embeddings.py",
"repo_id": "diffusers",
"token_count": 15879
} | 125 |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from torch import nn
from .transformer_2d import Transformer2DModel, Transformer2DModelOutput
class DualTransformer2DModel(nn.Module):
"""
Dual transformer wrapper that combines two `Transformer2DModel`s for mixed inference.
Parameters:
num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention.
attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head.
in_channels (`int`, *optional*):
Pass if the input is continuous. The number of channels in the input and output.
num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use.
dropout (`float`, *optional*, defaults to 0.1): The dropout probability to use.
cross_attention_dim (`int`, *optional*): The number of encoder_hidden_states dimensions to use.
sample_size (`int`, *optional*): Pass if the input is discrete. The width of the latent images.
Note that this is fixed at training time as it is used for learning a number of position embeddings. See
`ImagePositionalEmbeddings`.
num_vector_embeds (`int`, *optional*):
Pass if the input is discrete. The number of classes of the vector embeddings of the latent pixels.
Includes the class for the masked latent pixel.
activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
num_embeds_ada_norm ( `int`, *optional*): Pass if at least one of the norm_layers is `AdaLayerNorm`.
The number of diffusion steps used during training. Note that this is fixed at training time as it is used
to learn a number of embeddings that are added to the hidden states. During inference, you can denoise for
up to but not more than steps than `num_embeds_ada_norm`.
attention_bias (`bool`, *optional*):
Configure if the TransformerBlocks' attention should contain a bias parameter.
"""
def __init__(
self,
num_attention_heads: int = 16,
attention_head_dim: int = 88,
in_channels: Optional[int] = None,
num_layers: int = 1,
dropout: float = 0.0,
norm_num_groups: int = 32,
cross_attention_dim: Optional[int] = None,
attention_bias: bool = False,
sample_size: Optional[int] = None,
num_vector_embeds: Optional[int] = None,
activation_fn: str = "geglu",
num_embeds_ada_norm: Optional[int] = None,
):
super().__init__()
self.transformers = nn.ModuleList(
[
Transformer2DModel(
num_attention_heads=num_attention_heads,
attention_head_dim=attention_head_dim,
in_channels=in_channels,
num_layers=num_layers,
dropout=dropout,
norm_num_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
attention_bias=attention_bias,
sample_size=sample_size,
num_vector_embeds=num_vector_embeds,
activation_fn=activation_fn,
num_embeds_ada_norm=num_embeds_ada_norm,
)
for _ in range(2)
]
)
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
self.mix_ratio = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
self.condition_lengths = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
self.transformer_index_for_condition = [1, 0]
def forward(
self,
hidden_states,
encoder_hidden_states,
timestep=None,
attention_mask=None,
cross_attention_kwargs=None,
return_dict: bool = True,
):
"""
Args:
hidden_states ( When discrete, `torch.LongTensor` of shape `(batch size, num latent pixels)`.
When continuous, `torch.FloatTensor` of shape `(batch size, channel, height, width)`): Input
hidden_states.
encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*):
Conditional embeddings for cross attention layer. If not given, cross-attention defaults to
self-attention.
timestep ( `torch.long`, *optional*):
Optional timestep to be applied as an embedding in AdaLayerNorm's. Used to indicate denoising step.
attention_mask (`torch.FloatTensor`, *optional*):
Optional attention mask to be applied in Attention.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
`self.processor` in
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.
Returns:
[`~models.transformer_2d.Transformer2DModelOutput`] or `tuple`:
[`~models.transformer_2d.Transformer2DModelOutput`] if `return_dict` is True, otherwise a `tuple`. When
returning a tuple, the first element is the sample tensor.
"""
input_states = hidden_states
encoded_states = []
tokens_start = 0
# attention_mask is not used yet
for i in range(2):
# for each of the two transformers, pass the corresponding condition tokens
condition_state = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
transformer_index = self.transformer_index_for_condition[i]
encoded_state = self.transformers[transformer_index](
input_states,
encoder_hidden_states=condition_state,
timestep=timestep,
cross_attention_kwargs=cross_attention_kwargs,
return_dict=False,
)[0]
encoded_states.append(encoded_state - input_states)
tokens_start += self.condition_lengths[i]
output_states = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
output_states = output_states + input_states
if not return_dict:
return (output_states,)
return Transformer2DModelOutput(sample=output_states)
| diffusers/src/diffusers/models/transformers/dual_transformer_2d.py/0 | {
"file_path": "diffusers/src/diffusers/models/transformers/dual_transformer_2d.py",
"repo_id": "diffusers",
"token_count": 3162
} | 126 |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
import torch.nn as nn
import torch.utils.checkpoint
from ...configuration_utils import ConfigMixin, register_to_config
from ...loaders import PeftAdapterMixin, UNet2DConditionLoadersMixin
from ...utils import USE_PEFT_BACKEND, BaseOutput, deprecate, logging, scale_lora_layers, unscale_lora_layers
from ..activations import get_activation
from ..attention_processor import (
ADDED_KV_ATTENTION_PROCESSORS,
CROSS_ATTENTION_PROCESSORS,
Attention,
AttentionProcessor,
AttnAddedKVProcessor,
AttnProcessor,
)
from ..embeddings import (
GaussianFourierProjection,
GLIGENTextBoundingboxProjection,
ImageHintTimeEmbedding,
ImageProjection,
ImageTimeEmbedding,
TextImageProjection,
TextImageTimeEmbedding,
TextTimeEmbedding,
TimestepEmbedding,
Timesteps,
)
from ..modeling_utils import ModelMixin
from .unet_2d_blocks import (
get_down_block,
get_mid_block,
get_up_block,
)
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@dataclass
class UNet2DConditionOutput(BaseOutput):
"""
The output of [`UNet2DConditionModel`].
Args:
sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
The hidden states output conditioned on `encoder_hidden_states` input. Output of last layer of model.
"""
sample: torch.FloatTensor = None
class UNet2DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin, PeftAdapterMixin):
r"""
A conditional 2D UNet model that takes a noisy sample, conditional state, and a timestep and returns a sample
shaped output.
This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
for all models (such as downloading or saving).
Parameters:
sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):
Height and width of input/output sample.
in_channels (`int`, *optional*, defaults to 4): Number of channels in the input sample.
out_channels (`int`, *optional*, defaults to 4): Number of channels in the output.
center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample.
flip_sin_to_cos (`bool`, *optional*, defaults to `True`):
Whether to flip the sin to cos in the time embedding.
freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding.
down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`):
The tuple of downsample blocks to use.
mid_block_type (`str`, *optional*, defaults to `"UNetMidBlock2DCrossAttn"`):
Block type for middle of UNet, it can be one of `UNetMidBlock2DCrossAttn`, `UNetMidBlock2D`, or
`UNetMidBlock2DSimpleCrossAttn`. If `None`, the mid block layer is skipped.
up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")`):
The tuple of upsample blocks to use.
only_cross_attention(`bool` or `Tuple[bool]`, *optional*, default to `False`):
Whether to include self-attention in the basic transformer blocks, see
[`~models.attention.BasicTransformerBlock`].
block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):
The tuple of output channels for each block.
layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block.
downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution.
mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block.
dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization.
If `None`, normalization and activation layers is skipped in post-processing.
norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization.
cross_attention_dim (`int` or `Tuple[int]`, *optional*, defaults to 1280):
The dimension of the cross attention features.
transformer_layers_per_block (`int`, `Tuple[int]`, or `Tuple[Tuple]` , *optional*, defaults to 1):
The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for
[`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`],
[`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`].
reverse_transformer_layers_per_block : (`Tuple[Tuple]`, *optional*, defaults to None):
The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`], in the upsampling
blocks of the U-Net. Only relevant if `transformer_layers_per_block` is of type `Tuple[Tuple]` and for
[`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`],
[`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`].
encoder_hid_dim (`int`, *optional*, defaults to None):
If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim`
dimension to `cross_attention_dim`.
encoder_hid_dim_type (`str`, *optional*, defaults to `None`):
If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text
embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`.
attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads.
num_attention_heads (`int`, *optional*):
The number of attention heads. If not defined, defaults to `attention_head_dim`
resnet_time_scale_shift (`str`, *optional*, defaults to `"default"`): Time scale shift config
for ResNet blocks (see [`~models.resnet.ResnetBlock2D`]). Choose from `default` or `scale_shift`.
class_embed_type (`str`, *optional*, defaults to `None`):
The type of class embedding to use which is ultimately summed with the time embeddings. Choose from `None`,
`"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`.
addition_embed_type (`str`, *optional*, defaults to `None`):
Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or
"text". "text" will use the `TextTimeEmbedding` layer.
addition_time_embed_dim: (`int`, *optional*, defaults to `None`):
Dimension for the timestep embeddings.
num_class_embeds (`int`, *optional*, defaults to `None`):
Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing
class conditioning with `class_embed_type` equal to `None`.
time_embedding_type (`str`, *optional*, defaults to `positional`):
The type of position embedding to use for timesteps. Choose from `positional` or `fourier`.
time_embedding_dim (`int`, *optional*, defaults to `None`):
An optional override for the dimension of the projected time embedding.
time_embedding_act_fn (`str`, *optional*, defaults to `None`):
Optional activation function to use only once on the time embeddings before they are passed to the rest of
the UNet. Choose from `silu`, `mish`, `gelu`, and `swish`.
timestep_post_act (`str`, *optional*, defaults to `None`):
The second activation function to use in timestep embedding. Choose from `silu`, `mish` and `gelu`.
time_cond_proj_dim (`int`, *optional*, defaults to `None`):
The dimension of `cond_proj` layer in the timestep embedding.
conv_in_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_in` layer.
conv_out_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_out` layer.
projection_class_embeddings_input_dim (`int`, *optional*): The dimension of the `class_labels` input when
`class_embed_type="projection"`. Required when `class_embed_type="projection"`.
class_embeddings_concat (`bool`, *optional*, defaults to `False`): Whether to concatenate the time
embeddings with the class embeddings.
mid_block_only_cross_attention (`bool`, *optional*, defaults to `None`):
Whether to use cross attention with the mid block when using the `UNetMidBlock2DSimpleCrossAttn`. If
`only_cross_attention` is given as a single boolean and `mid_block_only_cross_attention` is `None`, the
`only_cross_attention` value is used as the value for `mid_block_only_cross_attention`. Default to `False`
otherwise.
"""
_supports_gradient_checkpointing = True
@register_to_config
def __init__(
self,
sample_size: Optional[int] = None,
in_channels: int = 4,
out_channels: int = 4,
center_input_sample: bool = False,
flip_sin_to_cos: bool = True,
freq_shift: int = 0,
down_block_types: Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
),
mid_block_type: Optional[str] = "UNetMidBlock2DCrossAttn",
up_block_types: Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D"),
only_cross_attention: Union[bool, Tuple[bool]] = False,
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
layers_per_block: Union[int, Tuple[int]] = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
dropout: float = 0.0,
act_fn: str = "silu",
norm_num_groups: Optional[int] = 32,
norm_eps: float = 1e-5,
cross_attention_dim: Union[int, Tuple[int]] = 1280,
transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple]] = 1,
reverse_transformer_layers_per_block: Optional[Tuple[Tuple[int]]] = None,
encoder_hid_dim: Optional[int] = None,
encoder_hid_dim_type: Optional[str] = None,
attention_head_dim: Union[int, Tuple[int]] = 8,
num_attention_heads: Optional[Union[int, Tuple[int]]] = None,
dual_cross_attention: bool = False,
use_linear_projection: bool = False,
class_embed_type: Optional[str] = None,
addition_embed_type: Optional[str] = None,
addition_time_embed_dim: Optional[int] = None,
num_class_embeds: Optional[int] = None,
upcast_attention: bool = False,
resnet_time_scale_shift: str = "default",
resnet_skip_time_act: bool = False,
resnet_out_scale_factor: float = 1.0,
time_embedding_type: str = "positional",
time_embedding_dim: Optional[int] = None,
time_embedding_act_fn: Optional[str] = None,
timestep_post_act: Optional[str] = None,
time_cond_proj_dim: Optional[int] = None,
conv_in_kernel: int = 3,
conv_out_kernel: int = 3,
projection_class_embeddings_input_dim: Optional[int] = None,
attention_type: str = "default",
class_embeddings_concat: bool = False,
mid_block_only_cross_attention: Optional[bool] = None,
cross_attention_norm: Optional[str] = None,
addition_embed_type_num_heads: int = 64,
):
super().__init__()
self.sample_size = sample_size
if num_attention_heads is not None:
raise ValueError(
"At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19."
)
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
num_attention_heads = num_attention_heads or attention_head_dim
# Check inputs
self._check_config(
down_block_types=down_block_types,
up_block_types=up_block_types,
only_cross_attention=only_cross_attention,
block_out_channels=block_out_channels,
layers_per_block=layers_per_block,
cross_attention_dim=cross_attention_dim,
transformer_layers_per_block=transformer_layers_per_block,
reverse_transformer_layers_per_block=reverse_transformer_layers_per_block,
attention_head_dim=attention_head_dim,
num_attention_heads=num_attention_heads,
)
# input
conv_in_padding = (conv_in_kernel - 1) // 2
self.conv_in = nn.Conv2d(
in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding
)
# time
time_embed_dim, timestep_input_dim = self._set_time_proj(
time_embedding_type,
block_out_channels=block_out_channels,
flip_sin_to_cos=flip_sin_to_cos,
freq_shift=freq_shift,
time_embedding_dim=time_embedding_dim,
)
self.time_embedding = TimestepEmbedding(
timestep_input_dim,
time_embed_dim,
act_fn=act_fn,
post_act_fn=timestep_post_act,
cond_proj_dim=time_cond_proj_dim,
)
self._set_encoder_hid_proj(
encoder_hid_dim_type,
cross_attention_dim=cross_attention_dim,
encoder_hid_dim=encoder_hid_dim,
)
# class embedding
self._set_class_embedding(
class_embed_type,
act_fn=act_fn,
num_class_embeds=num_class_embeds,
projection_class_embeddings_input_dim=projection_class_embeddings_input_dim,
time_embed_dim=time_embed_dim,
timestep_input_dim=timestep_input_dim,
)
self._set_add_embedding(
addition_embed_type,
addition_embed_type_num_heads=addition_embed_type_num_heads,
addition_time_embed_dim=addition_time_embed_dim,
cross_attention_dim=cross_attention_dim,
encoder_hid_dim=encoder_hid_dim,
flip_sin_to_cos=flip_sin_to_cos,
freq_shift=freq_shift,
projection_class_embeddings_input_dim=projection_class_embeddings_input_dim,
time_embed_dim=time_embed_dim,
)
if time_embedding_act_fn is None:
self.time_embed_act = None
else:
self.time_embed_act = get_activation(time_embedding_act_fn)
self.down_blocks = nn.ModuleList([])
self.up_blocks = nn.ModuleList([])
if isinstance(only_cross_attention, bool):
if mid_block_only_cross_attention is None:
mid_block_only_cross_attention = only_cross_attention
only_cross_attention = [only_cross_attention] * len(down_block_types)
if mid_block_only_cross_attention is None:
mid_block_only_cross_attention = False
if isinstance(num_attention_heads, int):
num_attention_heads = (num_attention_heads,) * len(down_block_types)
if isinstance(attention_head_dim, int):
attention_head_dim = (attention_head_dim,) * len(down_block_types)
if isinstance(cross_attention_dim, int):
cross_attention_dim = (cross_attention_dim,) * len(down_block_types)
if isinstance(layers_per_block, int):
layers_per_block = [layers_per_block] * len(down_block_types)
if isinstance(transformer_layers_per_block, int):
transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types)
if class_embeddings_concat:
# The time embeddings are concatenated with the class embeddings. The dimension of the
# time embeddings passed to the down, middle, and up blocks is twice the dimension of the
# regular time embeddings
blocks_time_embed_dim = time_embed_dim * 2
else:
blocks_time_embed_dim = time_embed_dim
# down
output_channel = block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
down_block = get_down_block(
down_block_type,
num_layers=layers_per_block[i],
transformer_layers_per_block=transformer_layers_per_block[i],
in_channels=input_channel,
out_channels=output_channel,
temb_channels=blocks_time_embed_dim,
add_downsample=not is_final_block,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim[i],
num_attention_heads=num_attention_heads[i],
downsample_padding=downsample_padding,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention[i],
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
attention_type=attention_type,
resnet_skip_time_act=resnet_skip_time_act,
resnet_out_scale_factor=resnet_out_scale_factor,
cross_attention_norm=cross_attention_norm,
attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel,
dropout=dropout,
)
self.down_blocks.append(down_block)
# mid
self.mid_block = get_mid_block(
mid_block_type,
temb_channels=blocks_time_embed_dim,
in_channels=block_out_channels[-1],
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
output_scale_factor=mid_block_scale_factor,
transformer_layers_per_block=transformer_layers_per_block[-1],
num_attention_heads=num_attention_heads[-1],
cross_attention_dim=cross_attention_dim[-1],
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
mid_block_only_cross_attention=mid_block_only_cross_attention,
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
attention_type=attention_type,
resnet_skip_time_act=resnet_skip_time_act,
cross_attention_norm=cross_attention_norm,
attention_head_dim=attention_head_dim[-1],
dropout=dropout,
)
# count how many layers upsample the images
self.num_upsamplers = 0
# up
reversed_block_out_channels = list(reversed(block_out_channels))
reversed_num_attention_heads = list(reversed(num_attention_heads))
reversed_layers_per_block = list(reversed(layers_per_block))
reversed_cross_attention_dim = list(reversed(cross_attention_dim))
reversed_transformer_layers_per_block = (
list(reversed(transformer_layers_per_block))
if reverse_transformer_layers_per_block is None
else reverse_transformer_layers_per_block
)
only_cross_attention = list(reversed(only_cross_attention))
output_channel = reversed_block_out_channels[0]
for i, up_block_type in enumerate(up_block_types):
is_final_block = i == len(block_out_channels) - 1
prev_output_channel = output_channel
output_channel = reversed_block_out_channels[i]
input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
# add upsample block for all BUT final layer
if not is_final_block:
add_upsample = True
self.num_upsamplers += 1
else:
add_upsample = False
up_block = get_up_block(
up_block_type,
num_layers=reversed_layers_per_block[i] + 1,
transformer_layers_per_block=reversed_transformer_layers_per_block[i],
in_channels=input_channel,
out_channels=output_channel,
prev_output_channel=prev_output_channel,
temb_channels=blocks_time_embed_dim,
add_upsample=add_upsample,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resolution_idx=i,
resnet_groups=norm_num_groups,
cross_attention_dim=reversed_cross_attention_dim[i],
num_attention_heads=reversed_num_attention_heads[i],
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention[i],
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
attention_type=attention_type,
resnet_skip_time_act=resnet_skip_time_act,
resnet_out_scale_factor=resnet_out_scale_factor,
cross_attention_norm=cross_attention_norm,
attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel,
dropout=dropout,
)
self.up_blocks.append(up_block)
prev_output_channel = output_channel
# out
if norm_num_groups is not None:
self.conv_norm_out = nn.GroupNorm(
num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps
)
self.conv_act = get_activation(act_fn)
else:
self.conv_norm_out = None
self.conv_act = None
conv_out_padding = (conv_out_kernel - 1) // 2
self.conv_out = nn.Conv2d(
block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding
)
self._set_pos_net_if_use_gligen(attention_type=attention_type, cross_attention_dim=cross_attention_dim)
def _check_config(
self,
down_block_types: Tuple[str],
up_block_types: Tuple[str],
only_cross_attention: Union[bool, Tuple[bool]],
block_out_channels: Tuple[int],
layers_per_block: Union[int, Tuple[int]],
cross_attention_dim: Union[int, Tuple[int]],
transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple[int]]],
reverse_transformer_layers_per_block: bool,
attention_head_dim: int,
num_attention_heads: Optional[Union[int, Tuple[int]]],
):
if len(down_block_types) != len(up_block_types):
raise ValueError(
f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}."
)
if len(block_out_channels) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
)
if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}."
)
if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}."
)
if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}."
)
if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}."
)
if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}."
)
if isinstance(transformer_layers_per_block, list) and reverse_transformer_layers_per_block is None:
for layer_number_per_block in transformer_layers_per_block:
if isinstance(layer_number_per_block, list):
raise ValueError("Must provide 'reverse_transformer_layers_per_block` if using asymmetrical UNet.")
def _set_time_proj(
self,
time_embedding_type: str,
block_out_channels: int,
flip_sin_to_cos: bool,
freq_shift: float,
time_embedding_dim: int,
) -> Tuple[int, int]:
if time_embedding_type == "fourier":
time_embed_dim = time_embedding_dim or block_out_channels[0] * 2
if time_embed_dim % 2 != 0:
raise ValueError(f"`time_embed_dim` should be divisible by 2, but is {time_embed_dim}.")
self.time_proj = GaussianFourierProjection(
time_embed_dim // 2, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos
)
timestep_input_dim = time_embed_dim
elif time_embedding_type == "positional":
time_embed_dim = time_embedding_dim or block_out_channels[0] * 4
self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
timestep_input_dim = block_out_channels[0]
else:
raise ValueError(
f"{time_embedding_type} does not exist. Please make sure to use one of `fourier` or `positional`."
)
return time_embed_dim, timestep_input_dim
def _set_encoder_hid_proj(
self,
encoder_hid_dim_type: Optional[str],
cross_attention_dim: Union[int, Tuple[int]],
encoder_hid_dim: Optional[int],
):
if encoder_hid_dim_type is None and encoder_hid_dim is not None:
encoder_hid_dim_type = "text_proj"
self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type)
logger.info("encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.")
if encoder_hid_dim is None and encoder_hid_dim_type is not None:
raise ValueError(
f"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}."
)
if encoder_hid_dim_type == "text_proj":
self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim)
elif encoder_hid_dim_type == "text_image_proj":
# image_embed_dim DOESN'T have to be `cross_attention_dim`. To not clutter the __init__ too much
# they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use
# case when `addition_embed_type == "text_image_proj"` (Kandinsky 2.1)`
self.encoder_hid_proj = TextImageProjection(
text_embed_dim=encoder_hid_dim,
image_embed_dim=cross_attention_dim,
cross_attention_dim=cross_attention_dim,
)
elif encoder_hid_dim_type == "image_proj":
# Kandinsky 2.2
self.encoder_hid_proj = ImageProjection(
image_embed_dim=encoder_hid_dim,
cross_attention_dim=cross_attention_dim,
)
elif encoder_hid_dim_type is not None:
raise ValueError(
f"encoder_hid_dim_type: {encoder_hid_dim_type} must be None, 'text_proj' or 'text_image_proj'."
)
else:
self.encoder_hid_proj = None
def _set_class_embedding(
self,
class_embed_type: Optional[str],
act_fn: str,
num_class_embeds: Optional[int],
projection_class_embeddings_input_dim: Optional[int],
time_embed_dim: int,
timestep_input_dim: int,
):
if class_embed_type is None and num_class_embeds is not None:
self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
elif class_embed_type == "timestep":
self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn)
elif class_embed_type == "identity":
self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
elif class_embed_type == "projection":
if projection_class_embeddings_input_dim is None:
raise ValueError(
"`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set"
)
# The projection `class_embed_type` is the same as the timestep `class_embed_type` except
# 1. the `class_labels` inputs are not first converted to sinusoidal embeddings
# 2. it projects from an arbitrary input dimension.
#
# Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations.
# When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings.
# As a result, `TimestepEmbedding` can be passed arbitrary vectors.
self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
elif class_embed_type == "simple_projection":
if projection_class_embeddings_input_dim is None:
raise ValueError(
"`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set"
)
self.class_embedding = nn.Linear(projection_class_embeddings_input_dim, time_embed_dim)
else:
self.class_embedding = None
def _set_add_embedding(
self,
addition_embed_type: str,
addition_embed_type_num_heads: int,
addition_time_embed_dim: Optional[int],
flip_sin_to_cos: bool,
freq_shift: float,
cross_attention_dim: Optional[int],
encoder_hid_dim: Optional[int],
projection_class_embeddings_input_dim: Optional[int],
time_embed_dim: int,
):
if addition_embed_type == "text":
if encoder_hid_dim is not None:
text_time_embedding_from_dim = encoder_hid_dim
else:
text_time_embedding_from_dim = cross_attention_dim
self.add_embedding = TextTimeEmbedding(
text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads
)
elif addition_embed_type == "text_image":
# text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much
# they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use
# case when `addition_embed_type == "text_image"` (Kandinsky 2.1)`
self.add_embedding = TextImageTimeEmbedding(
text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim
)
elif addition_embed_type == "text_time":
self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift)
self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
elif addition_embed_type == "image":
# Kandinsky 2.2
self.add_embedding = ImageTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim)
elif addition_embed_type == "image_hint":
# Kandinsky 2.2 ControlNet
self.add_embedding = ImageHintTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim)
elif addition_embed_type is not None:
raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.")
def _set_pos_net_if_use_gligen(self, attention_type: str, cross_attention_dim: int):
if attention_type in ["gated", "gated-text-image"]:
positive_len = 768
if isinstance(cross_attention_dim, int):
positive_len = cross_attention_dim
elif isinstance(cross_attention_dim, tuple) or isinstance(cross_attention_dim, list):
positive_len = cross_attention_dim[0]
feature_type = "text-only" if attention_type == "gated" else "text-image"
self.position_net = GLIGENTextBoundingboxProjection(
positive_len=positive_len, out_dim=cross_attention_dim, feature_type=feature_type
)
@property
def attn_processors(self) -> Dict[str, AttentionProcessor]:
r"""
Returns:
`dict` of attention processors: A dictionary containing all attention processors used in the model with
indexed by its weight name.
"""
# set recursively
processors = {}
def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
if hasattr(module, "get_processor"):
processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True)
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(name, module, processors)
return processors
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
r"""
Sets the attention processor to use to compute attention.
Parameters:
processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
The instantiated processor class or a dictionary of processor classes that will be set as the processor
for **all** `Attention` layers.
If `processor` is a dict, the key needs to define the path to the corresponding cross attention
processor. This is strongly recommended when setting trainable attention processors.
"""
count = len(self.attn_processors.keys())
if isinstance(processor, dict) and len(processor) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
)
def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
if hasattr(module, "set_processor"):
if not isinstance(processor, dict):
module.set_processor(processor)
else:
module.set_processor(processor.pop(f"{name}.processor"))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
for name, module in self.named_children():
fn_recursive_attn_processor(name, module, processor)
def set_default_attn_processor(self):
"""
Disables custom attention processors and sets the default attention implementation.
"""
if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
processor = AttnAddedKVProcessor()
elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
processor = AttnProcessor()
else:
raise ValueError(
f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}"
)
self.set_attn_processor(processor)
def set_attention_slice(self, slice_size: Union[str, int, List[int]] = "auto"):
r"""
Enable sliced attention computation.
When this option is enabled, the attention module splits the input tensor in slices to compute attention in
several steps. This is useful for saving some memory in exchange for a small decrease in speed.
Args:
slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If
`"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is
provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
must be a multiple of `slice_size`.
"""
sliceable_head_dims = []
def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module):
if hasattr(module, "set_attention_slice"):
sliceable_head_dims.append(module.sliceable_head_dim)
for child in module.children():
fn_recursive_retrieve_sliceable_dims(child)
# retrieve number of attention layers
for module in self.children():
fn_recursive_retrieve_sliceable_dims(module)
num_sliceable_layers = len(sliceable_head_dims)
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
slice_size = [dim // 2 for dim in sliceable_head_dims]
elif slice_size == "max":
# make smallest slice possible
slice_size = num_sliceable_layers * [1]
slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
if len(slice_size) != len(sliceable_head_dims):
raise ValueError(
f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
)
for i in range(len(slice_size)):
size = slice_size[i]
dim = sliceable_head_dims[i]
if size is not None and size > dim:
raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
# Recursively walk through all the children.
# Any children which exposes the set_attention_slice method
# gets the message
def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
if hasattr(module, "set_attention_slice"):
module.set_attention_slice(slice_size.pop())
for child in module.children():
fn_recursive_set_attention_slice(child, slice_size)
reversed_slice_size = list(reversed(slice_size))
for module in self.children():
fn_recursive_set_attention_slice(module, reversed_slice_size)
def _set_gradient_checkpointing(self, module, value=False):
if hasattr(module, "gradient_checkpointing"):
module.gradient_checkpointing = value
def enable_freeu(self, s1: float, s2: float, b1: float, b2: float):
r"""Enables the FreeU mechanism from https://arxiv.org/abs/2309.11497.
The suffixes after the scaling factors represent the stage blocks where they are being applied.
Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of values that
are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL.
Args:
s1 (`float`):
Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to
mitigate the "oversmoothing effect" in the enhanced denoising process.
s2 (`float`):
Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to
mitigate the "oversmoothing effect" in the enhanced denoising process.
b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.
b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.
"""
for i, upsample_block in enumerate(self.up_blocks):
setattr(upsample_block, "s1", s1)
setattr(upsample_block, "s2", s2)
setattr(upsample_block, "b1", b1)
setattr(upsample_block, "b2", b2)
def disable_freeu(self):
"""Disables the FreeU mechanism."""
freeu_keys = {"s1", "s2", "b1", "b2"}
for i, upsample_block in enumerate(self.up_blocks):
for k in freeu_keys:
if hasattr(upsample_block, k) or getattr(upsample_block, k, None) is not None:
setattr(upsample_block, k, None)
def fuse_qkv_projections(self):
"""
Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query,
key, value) are fused. For cross-attention modules, key and value projection matrices are fused.
<Tip warning={true}>
This API is 🧪 experimental.
</Tip>
"""
self.original_attn_processors = None
for _, attn_processor in self.attn_processors.items():
if "Added" in str(attn_processor.__class__.__name__):
raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.")
self.original_attn_processors = self.attn_processors
for module in self.modules():
if isinstance(module, Attention):
module.fuse_projections(fuse=True)
def unfuse_qkv_projections(self):
"""Disables the fused QKV projection if enabled.
<Tip warning={true}>
This API is 🧪 experimental.
</Tip>
"""
if self.original_attn_processors is not None:
self.set_attn_processor(self.original_attn_processors)
def unload_lora(self):
"""Unloads LoRA weights."""
deprecate(
"unload_lora",
"0.28.0",
"Calling `unload_lora()` is deprecated and will be removed in a future version. Please install `peft` and then call `disable_adapters().",
)
for module in self.modules():
if hasattr(module, "set_lora_layer"):
module.set_lora_layer(None)
def get_time_embed(
self, sample: torch.Tensor, timestep: Union[torch.Tensor, float, int]
) -> Optional[torch.Tensor]:
timesteps = timestep
if not torch.is_tensor(timesteps):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
is_mps = sample.device.type == "mps"
if isinstance(timestep, float):
dtype = torch.float32 if is_mps else torch.float64
else:
dtype = torch.int32 if is_mps else torch.int64
timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
elif len(timesteps.shape) == 0:
timesteps = timesteps[None].to(sample.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
timesteps = timesteps.expand(sample.shape[0])
t_emb = self.time_proj(timesteps)
# `Timesteps` does not contain any weights and will always return f32 tensors
# but time_embedding might actually be running in fp16. so we need to cast here.
# there might be better ways to encapsulate this.
t_emb = t_emb.to(dtype=sample.dtype)
return t_emb
def get_class_embed(self, sample: torch.Tensor, class_labels: Optional[torch.Tensor]) -> Optional[torch.Tensor]:
class_emb = None
if self.class_embedding is not None:
if class_labels is None:
raise ValueError("class_labels should be provided when num_class_embeds > 0")
if self.config.class_embed_type == "timestep":
class_labels = self.time_proj(class_labels)
# `Timesteps` does not contain any weights and will always return f32 tensors
# there might be better ways to encapsulate this.
class_labels = class_labels.to(dtype=sample.dtype)
class_emb = self.class_embedding(class_labels).to(dtype=sample.dtype)
return class_emb
def get_aug_embed(
self, emb: torch.Tensor, encoder_hidden_states: torch.Tensor, added_cond_kwargs: Dict[str, Any]
) -> Optional[torch.Tensor]:
aug_emb = None
if self.config.addition_embed_type == "text":
aug_emb = self.add_embedding(encoder_hidden_states)
elif self.config.addition_embed_type == "text_image":
# Kandinsky 2.1 - style
if "image_embeds" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `addition_embed_type` set to 'text_image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`"
)
image_embs = added_cond_kwargs.get("image_embeds")
text_embs = added_cond_kwargs.get("text_embeds", encoder_hidden_states)
aug_emb = self.add_embedding(text_embs, image_embs)
elif self.config.addition_embed_type == "text_time":
# SDXL - style
if "text_embeds" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`"
)
text_embeds = added_cond_kwargs.get("text_embeds")
if "time_ids" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`"
)
time_ids = added_cond_kwargs.get("time_ids")
time_embeds = self.add_time_proj(time_ids.flatten())
time_embeds = time_embeds.reshape((text_embeds.shape[0], -1))
add_embeds = torch.concat([text_embeds, time_embeds], dim=-1)
add_embeds = add_embeds.to(emb.dtype)
aug_emb = self.add_embedding(add_embeds)
elif self.config.addition_embed_type == "image":
# Kandinsky 2.2 - style
if "image_embeds" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `addition_embed_type` set to 'image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`"
)
image_embs = added_cond_kwargs.get("image_embeds")
aug_emb = self.add_embedding(image_embs)
elif self.config.addition_embed_type == "image_hint":
# Kandinsky 2.2 - style
if "image_embeds" not in added_cond_kwargs or "hint" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `addition_embed_type` set to 'image_hint' which requires the keyword arguments `image_embeds` and `hint` to be passed in `added_cond_kwargs`"
)
image_embs = added_cond_kwargs.get("image_embeds")
hint = added_cond_kwargs.get("hint")
aug_emb = self.add_embedding(image_embs, hint)
return aug_emb
def process_encoder_hidden_states(
self, encoder_hidden_states: torch.Tensor, added_cond_kwargs: Dict[str, Any]
) -> torch.Tensor:
if self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_proj":
encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states)
elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_image_proj":
# Kandinsky 2.1 - style
if "image_embeds" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'text_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`"
)
image_embeds = added_cond_kwargs.get("image_embeds")
encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states, image_embeds)
elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "image_proj":
# Kandinsky 2.2 - style
if "image_embeds" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`"
)
image_embeds = added_cond_kwargs.get("image_embeds")
encoder_hidden_states = self.encoder_hid_proj(image_embeds)
elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "ip_image_proj":
if "image_embeds" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'ip_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`"
)
image_embeds = added_cond_kwargs.get("image_embeds")
image_embeds = self.encoder_hid_proj(image_embeds)
encoder_hidden_states = (encoder_hidden_states, image_embeds)
return encoder_hidden_states
def forward(
self,
sample: torch.FloatTensor,
timestep: Union[torch.Tensor, float, int],
encoder_hidden_states: torch.Tensor,
class_labels: Optional[torch.Tensor] = None,
timestep_cond: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
mid_block_additional_residual: Optional[torch.Tensor] = None,
down_intrablock_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
return_dict: bool = True,
) -> Union[UNet2DConditionOutput, Tuple]:
r"""
The [`UNet2DConditionModel`] forward method.
Args:
sample (`torch.FloatTensor`):
The noisy input tensor with the following shape `(batch, channel, height, width)`.
timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input.
encoder_hidden_states (`torch.FloatTensor`):
The encoder hidden states with shape `(batch, sequence_length, feature_dim)`.
class_labels (`torch.Tensor`, *optional*, defaults to `None`):
Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
timestep_cond: (`torch.Tensor`, *optional*, defaults to `None`):
Conditional embeddings for timestep. If provided, the embeddings will be summed with the samples passed
through the `self.time_embedding` layer to obtain the timestep embeddings.
attention_mask (`torch.Tensor`, *optional*, defaults to `None`):
An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
negative values to the attention scores corresponding to "discard" tokens.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
`self.processor` in
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
added_cond_kwargs: (`dict`, *optional*):
A kwargs dictionary containing additional embeddings that if specified are added to the embeddings that
are passed along to the UNet blocks.
down_block_additional_residuals: (`tuple` of `torch.Tensor`, *optional*):
A tuple of tensors that if specified are added to the residuals of down unet blocks.
mid_block_additional_residual: (`torch.Tensor`, *optional*):
A tensor that if specified is added to the residual of the middle unet block.
down_intrablock_additional_residuals (`tuple` of `torch.Tensor`, *optional*):
additional residuals to be added within UNet down blocks, for example from T2I-Adapter side model(s)
encoder_attention_mask (`torch.Tensor`):
A cross-attention mask of shape `(batch, sequence_length)` is applied to `encoder_hidden_states`. If
`True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias,
which adds large negative values to the attention scores corresponding to "discard" tokens.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain
tuple.
Returns:
[`~models.unets.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:
If `return_dict` is True, an [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] is returned, otherwise
a `tuple` is returned where the first element is the sample tensor.
"""
# By default samples have to be AT least a multiple of the overall upsampling factor.
# The overall upsampling factor is equal to 2 ** (# num of upsampling layers).
# However, the upsampling interpolation output size can be forced to fit any upsampling size
# on the fly if necessary.
default_overall_up_factor = 2**self.num_upsamplers
# upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`
forward_upsample_size = False
upsample_size = None
for dim in sample.shape[-2:]:
if dim % default_overall_up_factor != 0:
# Forward upsample size to force interpolation output size.
forward_upsample_size = True
break
# ensure attention_mask is a bias, and give it a singleton query_tokens dimension
# expects mask of shape:
# [batch, key_tokens]
# adds singleton query_tokens dimension:
# [batch, 1, key_tokens]
# this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:
# [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn)
# [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn)
if attention_mask is not None:
# assume that mask is expressed as:
# (1 = keep, 0 = discard)
# convert mask into a bias that can be added to attention scores:
# (keep = +0, discard = -10000.0)
attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
attention_mask = attention_mask.unsqueeze(1)
# convert encoder_attention_mask to a bias the same way we do for attention_mask
if encoder_attention_mask is not None:
encoder_attention_mask = (1 - encoder_attention_mask.to(sample.dtype)) * -10000.0
encoder_attention_mask = encoder_attention_mask.unsqueeze(1)
# 0. center input if necessary
if self.config.center_input_sample:
sample = 2 * sample - 1.0
# 1. time
t_emb = self.get_time_embed(sample=sample, timestep=timestep)
emb = self.time_embedding(t_emb, timestep_cond)
aug_emb = None
class_emb = self.get_class_embed(sample=sample, class_labels=class_labels)
if class_emb is not None:
if self.config.class_embeddings_concat:
emb = torch.cat([emb, class_emb], dim=-1)
else:
emb = emb + class_emb
aug_emb = self.get_aug_embed(
emb=emb, encoder_hidden_states=encoder_hidden_states, added_cond_kwargs=added_cond_kwargs
)
if self.config.addition_embed_type == "image_hint":
aug_emb, hint = aug_emb
sample = torch.cat([sample, hint], dim=1)
emb = emb + aug_emb if aug_emb is not None else emb
if self.time_embed_act is not None:
emb = self.time_embed_act(emb)
encoder_hidden_states = self.process_encoder_hidden_states(
encoder_hidden_states=encoder_hidden_states, added_cond_kwargs=added_cond_kwargs
)
# 2. pre-process
sample = self.conv_in(sample)
# 2.5 GLIGEN position net
if cross_attention_kwargs is not None and cross_attention_kwargs.get("gligen", None) is not None:
cross_attention_kwargs = cross_attention_kwargs.copy()
gligen_args = cross_attention_kwargs.pop("gligen")
cross_attention_kwargs["gligen"] = {"objs": self.position_net(**gligen_args)}
# 3. down
# we're popping the `scale` instead of getting it because otherwise `scale` will be propagated
# to the internal blocks and will raise deprecation warnings. this will be confusing for our users.
if cross_attention_kwargs is not None:
cross_attention_kwargs = cross_attention_kwargs.copy()
lora_scale = cross_attention_kwargs.pop("scale", 1.0)
else:
lora_scale = 1.0
if USE_PEFT_BACKEND:
# weight the lora layers by setting `lora_scale` for each PEFT layer
scale_lora_layers(self, lora_scale)
is_controlnet = mid_block_additional_residual is not None and down_block_additional_residuals is not None
# using new arg down_intrablock_additional_residuals for T2I-Adapters, to distinguish from controlnets
is_adapter = down_intrablock_additional_residuals is not None
# maintain backward compatibility for legacy usage, where
# T2I-Adapter and ControlNet both use down_block_additional_residuals arg
# but can only use one or the other
if not is_adapter and mid_block_additional_residual is None and down_block_additional_residuals is not None:
deprecate(
"T2I should not use down_block_additional_residuals",
"1.3.0",
"Passing intrablock residual connections with `down_block_additional_residuals` is deprecated \
and will be removed in diffusers 1.3.0. `down_block_additional_residuals` should only be used \
for ControlNet. Please make sure use `down_intrablock_additional_residuals` instead. ",
standard_warn=False,
)
down_intrablock_additional_residuals = down_block_additional_residuals
is_adapter = True
down_block_res_samples = (sample,)
for downsample_block in self.down_blocks:
if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention:
# For t2i-adapter CrossAttnDownBlock2D
additional_residuals = {}
if is_adapter and len(down_intrablock_additional_residuals) > 0:
additional_residuals["additional_residuals"] = down_intrablock_additional_residuals.pop(0)
sample, res_samples = downsample_block(
hidden_states=sample,
temb=emb,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
cross_attention_kwargs=cross_attention_kwargs,
encoder_attention_mask=encoder_attention_mask,
**additional_residuals,
)
else:
sample, res_samples = downsample_block(hidden_states=sample, temb=emb)
if is_adapter and len(down_intrablock_additional_residuals) > 0:
sample += down_intrablock_additional_residuals.pop(0)
down_block_res_samples += res_samples
if is_controlnet:
new_down_block_res_samples = ()
for down_block_res_sample, down_block_additional_residual in zip(
down_block_res_samples, down_block_additional_residuals
):
down_block_res_sample = down_block_res_sample + down_block_additional_residual
new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,)
down_block_res_samples = new_down_block_res_samples
# 4. mid
if self.mid_block is not None:
if hasattr(self.mid_block, "has_cross_attention") and self.mid_block.has_cross_attention:
sample = self.mid_block(
sample,
emb,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
cross_attention_kwargs=cross_attention_kwargs,
encoder_attention_mask=encoder_attention_mask,
)
else:
sample = self.mid_block(sample, emb)
# To support T2I-Adapter-XL
if (
is_adapter
and len(down_intrablock_additional_residuals) > 0
and sample.shape == down_intrablock_additional_residuals[0].shape
):
sample += down_intrablock_additional_residuals.pop(0)
if is_controlnet:
sample = sample + mid_block_additional_residual
# 5. up
for i, upsample_block in enumerate(self.up_blocks):
is_final_block = i == len(self.up_blocks) - 1
res_samples = down_block_res_samples[-len(upsample_block.resnets) :]
down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]
# if we have not reached the final block and need to forward the
# upsample size, we do it here
if not is_final_block and forward_upsample_size:
upsample_size = down_block_res_samples[-1].shape[2:]
if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention:
sample = upsample_block(
hidden_states=sample,
temb=emb,
res_hidden_states_tuple=res_samples,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
upsample_size=upsample_size,
attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
)
else:
sample = upsample_block(
hidden_states=sample,
temb=emb,
res_hidden_states_tuple=res_samples,
upsample_size=upsample_size,
)
# 6. post-process
if self.conv_norm_out:
sample = self.conv_norm_out(sample)
sample = self.conv_act(sample)
sample = self.conv_out(sample)
if USE_PEFT_BACKEND:
# remove `lora_scale` from each PEFT layer
unscale_lora_layers(self, lora_scale)
if not return_dict:
return (sample,)
return UNet2DConditionOutput(sample=sample)
| diffusers/src/diffusers/models/unets/unet_2d_condition.py/0 | {
"file_path": "diffusers/src/diffusers/models/unets/unet_2d_condition.py",
"repo_id": "diffusers",
"token_count": 29827
} | 127 |
from typing import TYPE_CHECKING
from ...utils import (
DIFFUSERS_SLOW_IMPORT,
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
is_transformers_available,
)
_dummy_objects = {}
_import_structure = {}
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
AmusedImg2ImgPipeline,
AmusedInpaintPipeline,
AmusedPipeline,
)
_dummy_objects.update(
{
"AmusedPipeline": AmusedPipeline,
"AmusedImg2ImgPipeline": AmusedImg2ImgPipeline,
"AmusedInpaintPipeline": AmusedInpaintPipeline,
}
)
else:
_import_structure["pipeline_amused"] = ["AmusedPipeline"]
_import_structure["pipeline_amused_img2img"] = ["AmusedImg2ImgPipeline"]
_import_structure["pipeline_amused_inpaint"] = ["AmusedInpaintPipeline"]
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
AmusedPipeline,
)
else:
from .pipeline_amused import AmusedPipeline
from .pipeline_amused_img2img import AmusedImg2ImgPipeline
from .pipeline_amused_inpaint import AmusedInpaintPipeline
else:
import sys
sys.modules[__name__] = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
module_spec=__spec__,
)
for name, value in _dummy_objects.items():
setattr(sys.modules[__name__], name, value)
| diffusers/src/diffusers/pipelines/amused/__init__.py/0 | {
"file_path": "diffusers/src/diffusers/pipelines/amused/__init__.py",
"repo_id": "diffusers",
"token_count": 796
} | 128 |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from transformers import BertTokenizer
from transformers.activations import QuickGELUActivation as QuickGELU
from transformers.modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPooling,
BaseModelOutputWithPoolingAndCrossAttentions,
)
from transformers.models.blip_2.configuration_blip_2 import Blip2Config, Blip2VisionConfig
from transformers.models.blip_2.modeling_blip_2 import (
Blip2Encoder,
Blip2PreTrainedModel,
Blip2QFormerAttention,
Blip2QFormerIntermediate,
Blip2QFormerOutput,
)
from transformers.pytorch_utils import apply_chunking_to_forward
from transformers.utils import (
logging,
replace_return_docstrings,
)
logger = logging.get_logger(__name__)
# There is an implementation of Blip2 in `transformers` : https://github.com/huggingface/transformers/blob/main/src/transformers/models/blip_2/modeling_blip_2.py.
# But it doesn't support getting multimodal embeddings. So, this module can be
# replaced with a future `transformers` version supports that.
class Blip2TextEmbeddings(nn.Module):
"""Construct the embeddings from word and position embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
self.config = config
def forward(
self,
input_ids=None,
position_ids=None,
query_embeds=None,
past_key_values_length=0,
):
if input_ids is not None:
seq_length = input_ids.size()[1]
else:
seq_length = 0
if position_ids is None:
position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length].clone()
if input_ids is not None:
embeddings = self.word_embeddings(input_ids)
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings = embeddings + position_embeddings
if query_embeds is not None:
batch_size = embeddings.shape[0]
# repeat the query embeddings for batch size
query_embeds = query_embeds.repeat(batch_size, 1, 1)
embeddings = torch.cat((query_embeds, embeddings), dim=1)
else:
embeddings = query_embeds
embeddings = embeddings.to(query_embeds.dtype)
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
# Copy-pasted from transformers.models.blip.modeling_blip.BlipVisionEmbeddings with Blip->Blip2
class Blip2VisionEmbeddings(nn.Module):
def __init__(self, config: Blip2VisionConfig):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.image_size = config.image_size
self.patch_size = config.patch_size
self.class_embedding = nn.Parameter(torch.randn(1, 1, self.embed_dim))
self.patch_embedding = nn.Conv2d(
in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, bias=False
)
self.num_patches = (self.image_size // self.patch_size) ** 2
self.num_positions = self.num_patches + 1
self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim))
def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
batch_size = pixel_values.shape[0]
target_dtype = self.patch_embedding.weight.dtype
patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid]
patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype)
embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
embeddings = embeddings + self.position_embedding[:, : embeddings.size(1), :].to(target_dtype)
return embeddings
# The Qformer encoder, which takes the visual embeddings, and the text input, to get multimodal embeddings
class Blip2QFormerEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList(
[Blip2QFormerLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
query_length=0,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions else None
next_decoder_cache = () if use_cache else None
for i in range(self.config.num_hidden_layers):
layer_module = self.layer[i]
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions, query_length)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
query_length,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if layer_module.has_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
# The layers making up the Qformer encoder
class Blip2QFormerLayer(nn.Module):
def __init__(self, config, layer_idx):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = Blip2QFormerAttention(config)
self.layer_idx = layer_idx
if layer_idx % config.cross_attention_frequency == 0:
self.crossattention = Blip2QFormerAttention(config, is_cross_attention=True)
self.has_cross_attention = True
else:
self.has_cross_attention = False
self.intermediate = Blip2QFormerIntermediate(config)
self.intermediate_query = Blip2QFormerIntermediate(config)
self.output_query = Blip2QFormerOutput(config)
self.output = Blip2QFormerOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
query_length=0,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
if query_length > 0:
query_attention_output = attention_output[:, :query_length, :]
if self.has_cross_attention:
if encoder_hidden_states is None:
raise ValueError("encoder_hidden_states must be given for cross-attention layers")
cross_attention_outputs = self.crossattention(
query_attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
output_attentions=output_attentions,
)
query_attention_output = cross_attention_outputs[0]
# add cross attentions if we output attention weights
outputs = outputs + cross_attention_outputs[1:-1]
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk_query,
self.chunk_size_feed_forward,
self.seq_len_dim,
query_attention_output,
)
if attention_output.shape[1] > query_length:
layer_output_text = apply_chunking_to_forward(
self.feed_forward_chunk,
self.chunk_size_feed_forward,
self.seq_len_dim,
attention_output[:, query_length:, :],
)
layer_output = torch.cat([layer_output, layer_output_text], dim=1)
else:
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk,
self.chunk_size_feed_forward,
self.seq_len_dim,
attention_output,
)
outputs = (layer_output,) + outputs
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
def feed_forward_chunk_query(self, attention_output):
intermediate_output = self.intermediate_query(attention_output)
layer_output = self.output_query(intermediate_output, attention_output)
return layer_output
# ProjLayer used to project the multimodal Blip2 embeddings to be used in the text encoder
class ProjLayer(nn.Module):
def __init__(self, in_dim, out_dim, hidden_dim, drop_p=0.1, eps=1e-12):
super().__init__()
# Dense1 -> Act -> Dense2 -> Drop -> Res -> Norm
self.dense1 = nn.Linear(in_dim, hidden_dim)
self.act_fn = QuickGELU()
self.dense2 = nn.Linear(hidden_dim, out_dim)
self.dropout = nn.Dropout(drop_p)
self.LayerNorm = nn.LayerNorm(out_dim, eps=eps)
def forward(self, x):
x_in = x
x = self.LayerNorm(x)
x = self.dropout(self.dense2(self.act_fn(self.dense1(x)))) + x_in
return x
# Copy-pasted from transformers.models.blip.modeling_blip.BlipVisionModel with Blip->Blip2, BLIP->BLIP_2
class Blip2VisionModel(Blip2PreTrainedModel):
main_input_name = "pixel_values"
config_class = Blip2VisionConfig
def __init__(self, config: Blip2VisionConfig):
super().__init__(config)
self.config = config
embed_dim = config.hidden_size
self.embeddings = Blip2VisionEmbeddings(config)
self.pre_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
self.encoder = Blip2Encoder(config)
self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
self.post_init()
@replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=Blip2VisionConfig)
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutputWithPooling]:
r"""
Returns:
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
hidden_states = self.embeddings(pixel_values)
hidden_states = self.pre_layernorm(hidden_states)
encoder_outputs = self.encoder(
inputs_embeds=hidden_states,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
last_hidden_state = encoder_outputs[0]
last_hidden_state = self.post_layernorm(last_hidden_state)
pooled_output = last_hidden_state[:, 0, :]
pooled_output = self.post_layernorm(pooled_output)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(
last_hidden_state=last_hidden_state,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
def get_input_embeddings(self):
return self.embeddings
# Qformer model, used to get multimodal embeddings from the text and image inputs
class Blip2QFormerModel(Blip2PreTrainedModel):
"""
Querying Transformer (Q-Former), used in BLIP-2.
"""
def __init__(self, config: Blip2Config):
super().__init__(config)
self.config = config
self.embeddings = Blip2TextEmbeddings(config.qformer_config)
self.visual_encoder = Blip2VisionModel(config.vision_config)
self.query_tokens = nn.Parameter(torch.zeros(1, config.num_query_tokens, config.qformer_config.hidden_size))
if not hasattr(config, "tokenizer") or config.tokenizer is None:
self.tokenizer = BertTokenizer.from_pretrained("bert-base-uncased", truncation_side="right")
else:
self.tokenizer = BertTokenizer.from_pretrained(config.tokenizer, truncation_side="right")
self.tokenizer.add_special_tokens({"bos_token": "[DEC]"})
self.proj_layer = ProjLayer(
in_dim=config.qformer_config.hidden_size,
out_dim=config.qformer_config.hidden_size,
hidden_dim=config.qformer_config.hidden_size * 4,
drop_p=0.1,
eps=1e-12,
)
self.encoder = Blip2QFormerEncoder(config.qformer_config)
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def get_extended_attention_mask(
self,
attention_mask: torch.Tensor,
input_shape: Tuple[int],
device: torch.device,
has_query: bool = False,
) -> torch.Tensor:
"""
Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
Arguments:
attention_mask (`torch.Tensor`):
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
input_shape (`Tuple[int]`):
The shape of the input to the model.
device (`torch.device`):
The device of the input to the model.
Returns:
`torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`.
"""
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
# Provided a padding mask of dimensions [batch_size, seq_length]
# - the model is an encoder, so make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(
"Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
input_shape, attention_mask.shape
)
)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def forward(
self,
text_input=None,
image_input=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of:
shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and
value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are
used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key
value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape
`(batch_size, sequence_length)`.
use_cache (`bool`, `optional`):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
"""
text = self.tokenizer(text_input, return_tensors="pt", padding=True)
text = text.to(self.device)
input_ids = text.input_ids
batch_size = input_ids.shape[0]
query_atts = torch.ones((batch_size, self.query_tokens.size()[1]), dtype=torch.long).to(self.device)
attention_mask = torch.cat([query_atts, text.attention_mask], dim=1)
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# past_key_values_length
past_key_values_length = (
past_key_values[0][0].shape[2] - self.config.query_length if past_key_values is not None else 0
)
query_length = self.query_tokens.shape[1]
embedding_output = self.embeddings(
input_ids=input_ids,
query_embeds=self.query_tokens,
past_key_values_length=past_key_values_length,
)
# embedding_output = self.layernorm(query_embeds)
# embedding_output = self.dropout(embedding_output)
input_shape = embedding_output.size()[:-1]
batch_size, seq_length = input_shape
device = embedding_output.device
image_embeds_frozen = self.visual_encoder(image_input).last_hidden_state
# image_embeds_frozen = torch.ones_like(image_embeds_frozen)
encoder_hidden_states = image_embeds_frozen
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_hidden_states is not None:
if isinstance(encoder_hidden_states, list):
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size()
else:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if isinstance(encoder_attention_mask, list):
encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask]
elif encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.qformer_config.num_hidden_layers)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
query_length=query_length,
)
sequence_output = encoder_outputs[0]
pooled_output = sequence_output[:, 0, :]
if not return_dict:
return self.proj_layer(sequence_output[:, :query_length, :])
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
| diffusers/src/diffusers/pipelines/blip_diffusion/modeling_blip2.py/0 | {
"file_path": "diffusers/src/diffusers/pipelines/blip_diffusion/modeling_blip2.py",
"repo_id": "diffusers",
"token_count": 12075
} | 129 |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging
from ...utils.torch_utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
class DanceDiffusionPipeline(DiffusionPipeline):
r"""
Pipeline for audio generation.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
Parameters:
unet ([`UNet1DModel`]):
A `UNet1DModel` to denoise the encoded audio.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded audio latents. Can be one of
[`IPNDMScheduler`].
"""
model_cpu_offload_seq = "unet"
def __init__(self, unet, scheduler):
super().__init__()
self.register_modules(unet=unet, scheduler=scheduler)
@torch.no_grad()
def __call__(
self,
batch_size: int = 1,
num_inference_steps: int = 100,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
audio_length_in_s: Optional[float] = None,
return_dict: bool = True,
) -> Union[AudioPipelineOutput, Tuple]:
r"""
The call function to the pipeline for generation.
Args:
batch_size (`int`, *optional*, defaults to 1):
The number of audio samples to generate.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher-quality audio sample at
the expense of slower inference.
generator (`torch.Generator`, *optional*):
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
generation deterministic.
audio_length_in_s (`float`, *optional*, defaults to `self.unet.config.sample_size/self.unet.config.sample_rate`):
The length of the generated audio sample in seconds.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.AudioPipelineOutput`] instead of a plain tuple.
Example:
```py
from diffusers import DiffusionPipeline
from scipy.io.wavfile import write
model_id = "harmonai/maestro-150k"
pipe = DiffusionPipeline.from_pretrained(model_id)
pipe = pipe.to("cuda")
audios = pipe(audio_length_in_s=4.0).audios
# To save locally
for i, audio in enumerate(audios):
write(f"maestro_test_{i}.wav", pipe.unet.sample_rate, audio.transpose())
# To dislay in google colab
import IPython.display as ipd
for audio in audios:
display(ipd.Audio(audio, rate=pipe.unet.sample_rate))
```
Returns:
[`~pipelines.AudioPipelineOutput`] or `tuple`:
If `return_dict` is `True`, [`~pipelines.AudioPipelineOutput`] is returned, otherwise a `tuple` is
returned where the first element is a list with the generated audio.
"""
if audio_length_in_s is None:
audio_length_in_s = self.unet.config.sample_size / self.unet.config.sample_rate
sample_size = audio_length_in_s * self.unet.config.sample_rate
down_scale_factor = 2 ** len(self.unet.up_blocks)
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"{audio_length_in_s} is too small. Make sure it's bigger or equal to"
f" {3 * down_scale_factor / self.unet.config.sample_rate}."
)
original_sample_size = int(sample_size)
if sample_size % down_scale_factor != 0:
sample_size = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"
f" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"
" process."
)
sample_size = int(sample_size)
dtype = next(self.unet.parameters()).dtype
shape = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
)
audio = randn_tensor(shape, generator=generator, device=self._execution_device, dtype=dtype)
# set step values
self.scheduler.set_timesteps(num_inference_steps, device=audio.device)
self.scheduler.timesteps = self.scheduler.timesteps.to(dtype)
for t in self.progress_bar(self.scheduler.timesteps):
# 1. predict noise model_output
model_output = self.unet(audio, t).sample
# 2. compute previous audio sample: x_t -> t_t-1
audio = self.scheduler.step(model_output, t, audio).prev_sample
audio = audio.clamp(-1, 1).float().cpu().numpy()
audio = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=audio)
| diffusers/src/diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py/0 | {
"file_path": "diffusers/src/diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py",
"repo_id": "diffusers",
"token_count": 2622
} | 130 |
# Deprecated Pipelines
This folder contains pipelines that have very low usage as measured by model downloads, issues and PRs. While you can still use the pipelines just as before, we will stop testing the pipelines and will not accept any changes to existing files. | diffusers/src/diffusers/pipelines/deprecated/README.md/0 | {
"file_path": "diffusers/src/diffusers/pipelines/deprecated/README.md",
"repo_id": "diffusers",
"token_count": 54
} | 131 |
from typing import TYPE_CHECKING
from ....utils import DIFFUSERS_SLOW_IMPORT, _LazyModule
_import_structure = {"pipeline_score_sde_ve": ["ScoreSdeVePipeline"]}
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
from .pipeline_score_sde_ve import ScoreSdeVePipeline
else:
import sys
sys.modules[__name__] = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
module_spec=__spec__,
)
| diffusers/src/diffusers/pipelines/deprecated/score_sde_ve/__init__.py/0 | {
"file_path": "diffusers/src/diffusers/pipelines/deprecated/score_sde_ve/__init__.py",
"repo_id": "diffusers",
"token_count": 193
} | 132 |
from typing import TYPE_CHECKING
from ....utils import (
DIFFUSERS_SLOW_IMPORT,
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
_dummy_objects = {}
_import_structure = {}
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ....utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
_dummy_objects.update(
{
"VersatileDiffusionDualGuidedPipeline": VersatileDiffusionDualGuidedPipeline,
"VersatileDiffusionImageVariationPipeline": VersatileDiffusionImageVariationPipeline,
"VersatileDiffusionPipeline": VersatileDiffusionPipeline,
"VersatileDiffusionTextToImagePipeline": VersatileDiffusionTextToImagePipeline,
}
)
else:
_import_structure["modeling_text_unet"] = ["UNetFlatConditionModel"]
_import_structure["pipeline_versatile_diffusion"] = ["VersatileDiffusionPipeline"]
_import_structure["pipeline_versatile_diffusion_dual_guided"] = ["VersatileDiffusionDualGuidedPipeline"]
_import_structure["pipeline_versatile_diffusion_image_variation"] = ["VersatileDiffusionImageVariationPipeline"]
_import_structure["pipeline_versatile_diffusion_text_to_image"] = ["VersatileDiffusionTextToImagePipeline"]
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ....utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
else:
import sys
sys.modules[__name__] = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
module_spec=__spec__,
)
for name, value in _dummy_objects.items():
setattr(sys.modules[__name__], name, value)
| diffusers/src/diffusers/pipelines/deprecated/versatile_diffusion/__init__.py/0 | {
"file_path": "diffusers/src/diffusers/pipelines/deprecated/versatile_diffusion/__init__.py",
"repo_id": "diffusers",
"token_count": 1112
} | 133 |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, List, Optional, Union
import numpy as np
import PIL.Image
import torch
from PIL import Image
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNet2DConditionModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import (
logging,
replace_example_docstring,
)
from ...utils.torch_utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from .text_encoder import MultilingualCLIP
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
EXAMPLE_DOC_STRING = """
Examples:
```py
>>> from diffusers import KandinskyImg2ImgPipeline, KandinskyPriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16
... )
>>> pipe_prior.to("cuda")
>>> prompt = "A red cartoon frog, 4k"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyImg2ImgPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16
... )
>>> pipe.to("cuda")
>>> init_image = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/frog.png"
... )
>>> image = pipe(
... prompt,
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save("red_frog.png")
```
"""
def get_new_h_w(h, w, scale_factor=8):
new_h = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
new_w = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
def prepare_image(pil_image, w=512, h=512):
pil_image = pil_image.resize((w, h), resample=Image.BICUBIC, reducing_gap=1)
arr = np.array(pil_image.convert("RGB"))
arr = arr.astype(np.float32) / 127.5 - 1
arr = np.transpose(arr, [2, 0, 1])
image = torch.from_numpy(arr).unsqueeze(0)
return image
class KandinskyImg2ImgPipeline(DiffusionPipeline):
"""
Pipeline for image-to-image generation using Kandinsky
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
Args:
text_encoder ([`MultilingualCLIP`]):
Frozen text-encoder.
tokenizer ([`XLMRobertaTokenizer`]):
Tokenizer of class
scheduler ([`DDIMScheduler`]):
A scheduler to be used in combination with `unet` to generate image latents.
unet ([`UNet2DConditionModel`]):
Conditional U-Net architecture to denoise the image embedding.
movq ([`VQModel`]):
MoVQ image encoder and decoder
"""
model_cpu_offload_seq = "text_encoder->unet->movq"
def __init__(
self,
text_encoder: MultilingualCLIP,
movq: VQModel,
tokenizer: XLMRobertaTokenizer,
unet: UNet2DConditionModel,
scheduler: DDIMScheduler,
):
super().__init__()
self.register_modules(
text_encoder=text_encoder,
tokenizer=tokenizer,
unet=unet,
scheduler=scheduler,
movq=movq,
)
self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1)
def get_timesteps(self, num_inference_steps, strength, device):
# get the original timestep using init_timestep
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
t_start = max(num_inference_steps - init_timestep, 0)
timesteps = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def prepare_latents(self, latents, latent_timestep, shape, dtype, device, generator, scheduler):
if latents is None:
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
latents = latents.to(device)
latents = latents * scheduler.init_noise_sigma
shape = latents.shape
noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
latents = self.add_noise(latents, noise, latent_timestep)
return latents
def _encode_prompt(
self,
prompt,
device,
num_images_per_prompt,
do_classifier_free_guidance,
negative_prompt=None,
):
batch_size = len(prompt) if isinstance(prompt, list) else 1
# get prompt text embeddings
text_inputs = self.tokenizer(
prompt,
padding="max_length",
max_length=77,
truncation=True,
return_attention_mask=True,
add_special_tokens=True,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1])
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
)
text_input_ids = text_input_ids.to(device)
text_mask = text_inputs.attention_mask.to(device)
prompt_embeds, text_encoder_hidden_states = self.text_encoder(
input_ids=text_input_ids, attention_mask=text_mask
)
prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0)
text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0)
if do_classifier_free_guidance:
uncond_tokens: List[str]
if negative_prompt is None:
uncond_tokens = [""] * batch_size
elif type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
f" {type(prompt)}."
)
elif isinstance(negative_prompt, str):
uncond_tokens = [negative_prompt]
elif batch_size != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`."
)
else:
uncond_tokens = negative_prompt
uncond_input = self.tokenizer(
uncond_tokens,
padding="max_length",
max_length=77,
truncation=True,
return_attention_mask=True,
add_special_tokens=True,
return_tensors="pt",
)
uncond_text_input_ids = uncond_input.input_ids.to(device)
uncond_text_mask = uncond_input.attention_mask.to(device)
negative_prompt_embeds, uncond_text_encoder_hidden_states = self.text_encoder(
input_ids=uncond_text_input_ids, attention_mask=uncond_text_mask
)
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
seq_len = negative_prompt_embeds.shape[1]
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt)
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len)
seq_len = uncond_text_encoder_hidden_states.shape[1]
uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1)
uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt, seq_len, -1
)
uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0)
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
text_encoder_hidden_states = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states])
text_mask = torch.cat([uncond_text_mask, text_mask])
return prompt_embeds, text_encoder_hidden_states, text_mask
# add_noise method to overwrite the one in schedule because it use a different beta schedule for adding noise vs sampling
def add_noise(
self,
original_samples: torch.FloatTensor,
noise: torch.FloatTensor,
timesteps: torch.IntTensor,
) -> torch.FloatTensor:
betas = torch.linspace(0.0001, 0.02, 1000, dtype=torch.float32)
alphas = 1.0 - betas
alphas_cumprod = torch.cumprod(alphas, dim=0)
alphas_cumprod = alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype)
timesteps = timesteps.to(original_samples.device)
sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
sqrt_alpha_prod = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
@torch.no_grad()
@replace_example_docstring(EXAMPLE_DOC_STRING)
def __call__(
self,
prompt: Union[str, List[str]],
image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]],
image_embeds: torch.FloatTensor,
negative_image_embeds: torch.FloatTensor,
negative_prompt: Optional[Union[str, List[str]]] = None,
height: int = 512,
width: int = 512,
num_inference_steps: int = 100,
strength: float = 0.3,
guidance_scale: float = 7.0,
num_images_per_prompt: int = 1,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
output_type: Optional[str] = "pil",
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
callback_steps: int = 1,
return_dict: bool = True,
):
"""
Function invoked when calling the pipeline for generation.
Args:
prompt (`str` or `List[str]`):
The prompt or prompts to guide the image generation.
image (`torch.FloatTensor`, `PIL.Image.Image`):
`Image`, or tensor representing an image batch, that will be used as the starting point for the
process.
image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`):
The clip image embeddings for text prompt, that will be used to condition the image generation.
negative_image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`):
The clip image embeddings for negative text prompt, will be used to condition the image generation.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
if `guidance_scale` is less than `1`).
height (`int`, *optional*, defaults to 512):
The height in pixels of the generated image.
width (`int`, *optional*, defaults to 512):
The width in pixels of the generated image.
num_inference_steps (`int`, *optional*, defaults to 100):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
strength (`float`, *optional*, defaults to 0.3):
Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image`
will be used as a starting point, adding more noise to it the larger the `strength`. The number of
denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will
be maximum and the denoising process will run for the full number of iterations specified in
`num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
guidance_scale (`float`, *optional*, defaults to 4.0):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
to make generation deterministic.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"`
(`np.array`) or `"pt"` (`torch.Tensor`).
callback (`Callable`, *optional*):
A function that calls every `callback_steps` steps during inference. The function is called with the
following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
callback_steps (`int`, *optional*, defaults to 1):
The frequency at which the `callback` function is called. If not specified, the callback is called at
every step.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
Examples:
Returns:
[`~pipelines.ImagePipelineOutput`] or `tuple`
"""
# 1. Define call parameters
if isinstance(prompt, str):
batch_size = 1
elif isinstance(prompt, list):
batch_size = len(prompt)
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
device = self._execution_device
batch_size = batch_size * num_images_per_prompt
do_classifier_free_guidance = guidance_scale > 1.0
# 2. get text and image embeddings
prompt_embeds, text_encoder_hidden_states, _ = self._encode_prompt(
prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt
)
if isinstance(image_embeds, list):
image_embeds = torch.cat(image_embeds, dim=0)
if isinstance(negative_image_embeds, list):
negative_image_embeds = torch.cat(negative_image_embeds, dim=0)
if do_classifier_free_guidance:
image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
negative_image_embeds = negative_image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0).to(
dtype=prompt_embeds.dtype, device=device
)
# 3. pre-processing initial image
if not isinstance(image, list):
image = [image]
if not all(isinstance(i, (PIL.Image.Image, torch.Tensor)) for i in image):
raise ValueError(
f"Input is in incorrect format: {[type(i) for i in image]}. Currently, we only support PIL image and pytorch tensor"
)
image = torch.cat([prepare_image(i, width, height) for i in image], dim=0)
image = image.to(dtype=prompt_embeds.dtype, device=device)
latents = self.movq.encode(image)["latents"]
latents = latents.repeat_interleave(num_images_per_prompt, dim=0)
# 4. set timesteps
self.scheduler.set_timesteps(num_inference_steps, device=device)
timesteps_tensor, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
# the formular to calculate timestep for add_noise is taken from the original kandinsky repo
latent_timestep = int(self.scheduler.config.num_train_timesteps * strength) - 2
latent_timestep = torch.tensor([latent_timestep] * batch_size, dtype=timesteps_tensor.dtype, device=device)
num_channels_latents = self.unet.config.in_channels
height, width = get_new_h_w(height, width, self.movq_scale_factor)
# 5. Create initial latent
latents = self.prepare_latents(
latents,
latent_timestep,
(batch_size, num_channels_latents, height, width),
text_encoder_hidden_states.dtype,
device,
generator,
self.scheduler,
)
# 6. Denoising loop
for i, t in enumerate(self.progress_bar(timesteps_tensor)):
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
added_cond_kwargs = {"text_embeds": prompt_embeds, "image_embeds": image_embeds}
noise_pred = self.unet(
sample=latent_model_input,
timestep=t,
encoder_hidden_states=text_encoder_hidden_states,
added_cond_kwargs=added_cond_kwargs,
return_dict=False,
)[0]
if do_classifier_free_guidance:
noise_pred, variance_pred = noise_pred.split(latents.shape[1], dim=1)
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
_, variance_pred_text = variance_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
noise_pred = torch.cat([noise_pred, variance_pred_text], dim=1)
if not (
hasattr(self.scheduler.config, "variance_type")
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
noise_pred, _ = noise_pred.split(latents.shape[1], dim=1)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(
noise_pred,
t,
latents,
generator=generator,
).prev_sample
if callback is not None and i % callback_steps == 0:
step_idx = i // getattr(self.scheduler, "order", 1)
callback(step_idx, t, latents)
# 7. post-processing
image = self.movq.decode(latents, force_not_quantize=True)["sample"]
self.maybe_free_model_hooks()
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}")
if output_type in ["np", "pil"]:
image = image * 0.5 + 0.5
image = image.clamp(0, 1)
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
if output_type == "pil":
image = self.numpy_to_pil(image)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=image)
| diffusers/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_img2img.py/0 | {
"file_path": "diffusers/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_img2img.py",
"repo_id": "diffusers",
"token_count": 9768
} | 134 |
import inspect
from typing import Callable, Dict, List, Optional, Union
import numpy as np
import PIL
import PIL.Image
import torch
from transformers import T5EncoderModel, T5Tokenizer
from ...loaders import LoraLoaderMixin
from ...models import Kandinsky3UNet, VQModel
from ...schedulers import DDPMScheduler
from ...utils import (
deprecate,
is_accelerate_available,
logging,
replace_example_docstring,
)
from ...utils.torch_utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
EXAMPLE_DOC_STRING = """
Examples:
```py
>>> from diffusers import AutoPipelineForImage2Image
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe = AutoPipelineForImage2Image.from_pretrained("kandinsky-community/kandinsky-3", variant="fp16", torch_dtype=torch.float16)
>>> pipe.enable_model_cpu_offload()
>>> prompt = "A painting of the inside of a subway train with tiny raccoons."
>>> image = load_image("https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinsky3/t2i.png")
>>> generator = torch.Generator(device="cpu").manual_seed(0)
>>> image = pipe(prompt, image=image, strength=0.75, num_inference_steps=25, generator=generator).images[0]
```
"""
def downscale_height_and_width(height, width, scale_factor=8):
new_height = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
new_width = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def prepare_image(pil_image):
arr = np.array(pil_image.convert("RGB"))
arr = arr.astype(np.float32) / 127.5 - 1
arr = np.transpose(arr, [2, 0, 1])
image = torch.from_numpy(arr).unsqueeze(0)
return image
class Kandinsky3Img2ImgPipeline(DiffusionPipeline, LoraLoaderMixin):
model_cpu_offload_seq = "text_encoder->movq->unet->movq"
_callback_tensor_inputs = [
"latents",
"prompt_embeds",
"negative_prompt_embeds",
"negative_attention_mask",
"attention_mask",
]
def __init__(
self,
tokenizer: T5Tokenizer,
text_encoder: T5EncoderModel,
unet: Kandinsky3UNet,
scheduler: DDPMScheduler,
movq: VQModel,
):
super().__init__()
self.register_modules(
tokenizer=tokenizer, text_encoder=text_encoder, unet=unet, scheduler=scheduler, movq=movq
)
def get_timesteps(self, num_inference_steps, strength, device):
# get the original timestep using init_timestep
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
t_start = max(num_inference_steps - init_timestep, 0)
timesteps = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def remove_all_hooks(self):
if is_accelerate_available():
from accelerate.hooks import remove_hook_from_module
else:
raise ImportError("Please install accelerate via `pip install accelerate`")
for model in [self.text_encoder, self.unet]:
if model is not None:
remove_hook_from_module(model, recurse=True)
self.unet_offload_hook = None
self.text_encoder_offload_hook = None
self.final_offload_hook = None
def _process_embeds(self, embeddings, attention_mask, cut_context):
# return embeddings, attention_mask
if cut_context:
embeddings[attention_mask == 0] = torch.zeros_like(embeddings[attention_mask == 0])
max_seq_length = attention_mask.sum(-1).max() + 1
embeddings = embeddings[:, :max_seq_length]
attention_mask = attention_mask[:, :max_seq_length]
return embeddings, attention_mask
@torch.no_grad()
def encode_prompt(
self,
prompt,
do_classifier_free_guidance=True,
num_images_per_prompt=1,
device=None,
negative_prompt=None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
_cut_context=False,
attention_mask: Optional[torch.FloatTensor] = None,
negative_attention_mask: Optional[torch.FloatTensor] = None,
):
r"""
Encodes the prompt into text encoder hidden states.
Args:
prompt (`str` or `List[str]`, *optional*):
prompt to be encoded
device: (`torch.device`, *optional*):
torch device to place the resulting embeddings on
num_images_per_prompt (`int`, *optional*, defaults to 1):
number of images that should be generated per prompt
do_classifier_free_guidance (`bool`, *optional*, defaults to `True`):
whether to use classifier free guidance or not
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
attention_mask (`torch.FloatTensor`, *optional*):
Pre-generated attention mask. Must provide if passing `prompt_embeds` directly.
negative_attention_mask (`torch.FloatTensor`, *optional*):
Pre-generated negative attention mask. Must provide if passing `negative_prompt_embeds` directly.
"""
if prompt is not None and negative_prompt is not None:
if type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
f" {type(prompt)}."
)
if device is None:
device = self._execution_device
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
max_length = 128
if prompt_embeds is None:
text_inputs = self.tokenizer(
prompt,
padding="max_length",
max_length=max_length,
truncation=True,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids.to(device)
attention_mask = text_inputs.attention_mask.to(device)
prompt_embeds = self.text_encoder(
text_input_ids,
attention_mask=attention_mask,
)
prompt_embeds = prompt_embeds[0]
prompt_embeds, attention_mask = self._process_embeds(prompt_embeds, attention_mask, _cut_context)
prompt_embeds = prompt_embeds * attention_mask.unsqueeze(2)
if self.text_encoder is not None:
dtype = self.text_encoder.dtype
else:
dtype = None
prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
bs_embed, seq_len, _ = prompt_embeds.shape
# duplicate text embeddings for each generation per prompt, using mps friendly method
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
attention_mask = attention_mask.repeat(num_images_per_prompt, 1)
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance and negative_prompt_embeds is None:
uncond_tokens: List[str]
if negative_prompt is None:
uncond_tokens = [""] * batch_size
elif isinstance(negative_prompt, str):
uncond_tokens = [negative_prompt]
elif batch_size != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`."
)
else:
uncond_tokens = negative_prompt
if negative_prompt is not None:
uncond_input = self.tokenizer(
uncond_tokens,
padding="max_length",
max_length=128,
truncation=True,
return_attention_mask=True,
return_tensors="pt",
)
text_input_ids = uncond_input.input_ids.to(device)
negative_attention_mask = uncond_input.attention_mask.to(device)
negative_prompt_embeds = self.text_encoder(
text_input_ids,
attention_mask=negative_attention_mask,
)
negative_prompt_embeds = negative_prompt_embeds[0]
negative_prompt_embeds = negative_prompt_embeds[:, : prompt_embeds.shape[1]]
negative_attention_mask = negative_attention_mask[:, : prompt_embeds.shape[1]]
negative_prompt_embeds = negative_prompt_embeds * negative_attention_mask.unsqueeze(2)
else:
negative_prompt_embeds = torch.zeros_like(prompt_embeds)
negative_attention_mask = torch.zeros_like(attention_mask)
if do_classifier_free_guidance:
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
seq_len = negative_prompt_embeds.shape[1]
negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device)
if negative_prompt_embeds.shape != prompt_embeds.shape:
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
negative_attention_mask = negative_attention_mask.repeat(num_images_per_prompt, 1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
else:
negative_prompt_embeds = None
negative_attention_mask = None
return prompt_embeds, negative_prompt_embeds, attention_mask, negative_attention_mask
def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None):
if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
)
image = image.to(device=device, dtype=dtype)
batch_size = batch_size * num_images_per_prompt
if image.shape[1] == 4:
init_latents = image
else:
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
)
elif isinstance(generator, list):
init_latents = [
self.movq.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)
]
init_latents = torch.cat(init_latents, dim=0)
else:
init_latents = self.movq.encode(image).latent_dist.sample(generator)
init_latents = self.movq.config.scaling_factor * init_latents
init_latents = torch.cat([init_latents], dim=0)
shape = init_latents.shape
noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
# get latents
init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
latents = init_latents
return latents
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
def prepare_extra_step_kwargs(self, generator, eta):
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
extra_step_kwargs = {}
if accepts_eta:
extra_step_kwargs["eta"] = eta
# check if the scheduler accepts generator
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
extra_step_kwargs["generator"] = generator
return extra_step_kwargs
def check_inputs(
self,
prompt,
callback_steps,
negative_prompt=None,
prompt_embeds=None,
negative_prompt_embeds=None,
callback_on_step_end_tensor_inputs=None,
attention_mask=None,
negative_attention_mask=None,
):
if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(callback_steps)}."
)
if callback_on_step_end_tensor_inputs is not None and not all(
k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
):
raise ValueError(
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
)
if prompt is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
" only forward one of the two."
)
elif prompt is None and prompt_embeds is None:
raise ValueError(
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
)
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
if negative_prompt is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
if prompt_embeds is not None and negative_prompt_embeds is not None:
if prompt_embeds.shape != negative_prompt_embeds.shape:
raise ValueError(
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
f" {negative_prompt_embeds.shape}."
)
if negative_prompt_embeds is not None and negative_attention_mask is None:
raise ValueError("Please provide `negative_attention_mask` along with `negative_prompt_embeds`")
if negative_prompt_embeds is not None and negative_attention_mask is not None:
if negative_prompt_embeds.shape[:2] != negative_attention_mask.shape:
raise ValueError(
"`negative_prompt_embeds` and `negative_attention_mask` must have the same batch_size and token length when passed directly, but"
f" got: `negative_prompt_embeds` {negative_prompt_embeds.shape[:2]} != `negative_attention_mask`"
f" {negative_attention_mask.shape}."
)
if prompt_embeds is not None and attention_mask is None:
raise ValueError("Please provide `attention_mask` along with `prompt_embeds`")
if prompt_embeds is not None and attention_mask is not None:
if prompt_embeds.shape[:2] != attention_mask.shape:
raise ValueError(
"`prompt_embeds` and `attention_mask` must have the same batch_size and token length when passed directly, but"
f" got: `prompt_embeds` {prompt_embeds.shape[:2]} != `attention_mask`"
f" {attention_mask.shape}."
)
@property
def guidance_scale(self):
return self._guidance_scale
@property
def do_classifier_free_guidance(self):
return self._guidance_scale > 1
@property
def num_timesteps(self):
return self._num_timesteps
@torch.no_grad()
@replace_example_docstring(EXAMPLE_DOC_STRING)
def __call__(
self,
prompt: Union[str, List[str]] = None,
image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] = None,
strength: float = 0.3,
num_inference_steps: int = 25,
guidance_scale: float = 3.0,
negative_prompt: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
negative_attention_mask: Optional[torch.FloatTensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
**kwargs,
):
"""
Function invoked when calling the pipeline for generation.
Args:
prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
instead.
image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`):
`Image`, or tensor representing an image batch, that will be used as the starting point for the
process.
strength (`float`, *optional*, defaults to 0.8):
Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a
starting point and more noise is added the higher the `strength`. The number of denoising steps depends
on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising
process runs for the full number of iterations specified in `num_inference_steps`. A value of 1
essentially ignores `image`.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
guidance_scale (`float`, *optional*, defaults to 3.0):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
to make generation deterministic.
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
attention_mask (`torch.FloatTensor`, *optional*):
Pre-generated attention mask. Must provide if passing `prompt_embeds` directly.
negative_attention_mask (`torch.FloatTensor`, *optional*):
Pre-generated negative attention mask. Must provide if passing `negative_prompt_embeds` directly.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple.
callback_on_step_end (`Callable`, *optional*):
A function that calls at the end of each denoising steps during the inference. The function is called
with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
`callback_on_step_end_tensor_inputs`.
callback_on_step_end_tensor_inputs (`List`, *optional*):
The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
`._callback_tensor_inputs` attribute of your pipeline class.
Examples:
Returns:
[`~pipelines.ImagePipelineOutput`] or `tuple`
"""
callback = kwargs.pop("callback", None)
callback_steps = kwargs.pop("callback_steps", None)
if callback is not None:
deprecate(
"callback",
"1.0.0",
"Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
)
if callback_steps is not None:
deprecate(
"callback_steps",
"1.0.0",
"Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
)
if callback_on_step_end_tensor_inputs is not None and not all(
k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
):
raise ValueError(
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
)
cut_context = True
# 1. Check inputs. Raise error if not correct
self.check_inputs(
prompt,
callback_steps,
negative_prompt,
prompt_embeds,
negative_prompt_embeds,
callback_on_step_end_tensor_inputs,
attention_mask,
negative_attention_mask,
)
self._guidance_scale = guidance_scale
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
device = self._execution_device
# 3. Encode input prompt
prompt_embeds, negative_prompt_embeds, attention_mask, negative_attention_mask = self.encode_prompt(
prompt,
self.do_classifier_free_guidance,
num_images_per_prompt=num_images_per_prompt,
device=device,
negative_prompt=negative_prompt,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
_cut_context=cut_context,
attention_mask=attention_mask,
negative_attention_mask=negative_attention_mask,
)
if self.do_classifier_free_guidance:
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
attention_mask = torch.cat([negative_attention_mask, attention_mask]).bool()
if not isinstance(image, list):
image = [image]
if not all(isinstance(i, (PIL.Image.Image, torch.Tensor)) for i in image):
raise ValueError(
f"Input is in incorrect format: {[type(i) for i in image]}. Currently, we only support PIL image and pytorch tensor"
)
image = torch.cat([prepare_image(i) for i in image], dim=0)
image = image.to(dtype=prompt_embeds.dtype, device=device)
# 4. Prepare timesteps
self.scheduler.set_timesteps(num_inference_steps, device=device)
timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
# 5. Prepare latents
latents = self.movq.encode(image)["latents"]
latents = latents.repeat_interleave(num_images_per_prompt, dim=0)
latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
latents = self.prepare_latents(
latents, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator
)
if hasattr(self, "text_encoder_offload_hook") and self.text_encoder_offload_hook is not None:
self.text_encoder_offload_hook.offload()
# 7. Denoising loop
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
self._num_timesteps = len(timesteps)
with self.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
# predict the noise residual
noise_pred = self.unet(
latent_model_input,
t,
encoder_hidden_states=prompt_embeds,
encoder_attention_mask=attention_mask,
)[0]
if self.do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = (guidance_scale + 1.0) * noise_pred_text - guidance_scale * noise_pred_uncond
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(
noise_pred,
t,
latents,
generator=generator,
).prev_sample
if callback_on_step_end is not None:
callback_kwargs = {}
for k in callback_on_step_end_tensor_inputs:
callback_kwargs[k] = locals()[k]
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
latents = callback_outputs.pop("latents", latents)
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
attention_mask = callback_outputs.pop("attention_mask", attention_mask)
negative_attention_mask = callback_outputs.pop("negative_attention_mask", negative_attention_mask)
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
progress_bar.update()
if callback is not None and i % callback_steps == 0:
step_idx = i // getattr(self.scheduler, "order", 1)
callback(step_idx, t, latents)
# post-processing
if output_type not in ["pt", "np", "pil", "latent"]:
raise ValueError(
f"Only the output types `pt`, `pil`, `np` and `latent` are supported not output_type={output_type}"
)
if not output_type == "latent":
image = self.movq.decode(latents, force_not_quantize=True)["sample"]
if output_type in ["np", "pil"]:
image = image * 0.5 + 0.5
image = image.clamp(0, 1)
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
if output_type == "pil":
image = self.numpy_to_pil(image)
else:
image = latents
self.maybe_free_model_hooks()
if not return_dict:
return (image,)
return ImagePipelineOutput(images=image)
| diffusers/src/diffusers/pipelines/kandinsky3/pipeline_kandinsky3_img2img.py/0 | {
"file_path": "diffusers/src/diffusers/pipelines/kandinsky3/pipeline_kandinsky3_img2img.py",
"repo_id": "diffusers",
"token_count": 14216
} | 135 |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from typing import Callable, List, Optional, Union
import numpy as np
import PIL.Image
import torch
from transformers import CLIPImageProcessor
from ...image_processor import VaeImageProcessor
from ...models import AutoencoderKL, UNet2DConditionModel
from ...schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from ...utils import deprecate, logging
from ...utils.torch_utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin
from ..stable_diffusion import StableDiffusionPipelineOutput
from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from .image_encoder import PaintByExampleImageEncoder
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
def retrieve_latents(
encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
):
if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
return encoder_output.latent_dist.sample(generator)
elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
return encoder_output.latent_dist.mode()
elif hasattr(encoder_output, "latents"):
return encoder_output.latents
else:
raise AttributeError("Could not access latents of provided encoder_output")
def prepare_mask_and_masked_image(image, mask):
"""
Prepares a pair (image, mask) to be consumed by the Paint by Example pipeline. This means that those inputs will be
converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for the
``image`` and ``1`` for the ``mask``.
The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be
binarized (``mask > 0.5``) and cast to ``torch.float32`` too.
Args:
image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint.
It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width``
``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``.
mask (_type_): The mask to apply to the image, i.e. regions to inpaint.
It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width``
``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``.
Raises:
ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask
should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions.
TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not
(ot the other way around).
Returns:
tuple[torch.Tensor]: The pair (mask, masked_image) as ``torch.Tensor`` with 4
dimensions: ``batch x channels x height x width``.
"""
if isinstance(image, torch.Tensor):
if not isinstance(mask, torch.Tensor):
raise TypeError(f"`image` is a torch.Tensor but `mask` (type: {type(mask)} is not")
# Batch single image
if image.ndim == 3:
assert image.shape[0] == 3, "Image outside a batch should be of shape (3, H, W)"
image = image.unsqueeze(0)
# Batch and add channel dim for single mask
if mask.ndim == 2:
mask = mask.unsqueeze(0).unsqueeze(0)
# Batch single mask or add channel dim
if mask.ndim == 3:
# Batched mask
if mask.shape[0] == image.shape[0]:
mask = mask.unsqueeze(1)
else:
mask = mask.unsqueeze(0)
assert image.ndim == 4 and mask.ndim == 4, "Image and Mask must have 4 dimensions"
assert image.shape[-2:] == mask.shape[-2:], "Image and Mask must have the same spatial dimensions"
assert image.shape[0] == mask.shape[0], "Image and Mask must have the same batch size"
assert mask.shape[1] == 1, "Mask image must have a single channel"
# Check image is in [-1, 1]
if image.min() < -1 or image.max() > 1:
raise ValueError("Image should be in [-1, 1] range")
# Check mask is in [0, 1]
if mask.min() < 0 or mask.max() > 1:
raise ValueError("Mask should be in [0, 1] range")
# paint-by-example inverses the mask
mask = 1 - mask
# Binarize mask
mask[mask < 0.5] = 0
mask[mask >= 0.5] = 1
# Image as float32
image = image.to(dtype=torch.float32)
elif isinstance(mask, torch.Tensor):
raise TypeError(f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not")
else:
if isinstance(image, PIL.Image.Image):
image = [image]
image = np.concatenate([np.array(i.convert("RGB"))[None, :] for i in image], axis=0)
image = image.transpose(0, 3, 1, 2)
image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
# preprocess mask
if isinstance(mask, PIL.Image.Image):
mask = [mask]
mask = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask], axis=0)
mask = mask.astype(np.float32) / 255.0
# paint-by-example inverses the mask
mask = 1 - mask
mask[mask < 0.5] = 0
mask[mask >= 0.5] = 1
mask = torch.from_numpy(mask)
masked_image = image * mask
return mask, masked_image
class PaintByExamplePipeline(DiffusionPipeline, StableDiffusionMixin):
r"""
<Tip warning={true}>
🧪 This is an experimental feature!
</Tip>
Pipeline for image-guided image inpainting using Stable Diffusion.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
Args:
vae ([`AutoencoderKL`]):
Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
image_encoder ([`PaintByExampleImageEncoder`]):
Encodes the example input image. The `unet` is conditioned on the example image instead of a text prompt.
tokenizer ([`~transformers.CLIPTokenizer`]):
A `CLIPTokenizer` to tokenize text.
unet ([`UNet2DConditionModel`]):
A `UNet2DConditionModel` to denoise the encoded image latents.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
safety_checker ([`StableDiffusionSafetyChecker`]):
Classification module that estimates whether generated images could be considered offensive or harmful.
Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
about a model's potential harms.
feature_extractor ([`~transformers.CLIPImageProcessor`]):
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
"""
# TODO: feature_extractor is required to encode initial images (if they are in PIL format),
# we should give a descriptive message if the pipeline doesn't have one.
model_cpu_offload_seq = "unet->vae"
_exclude_from_cpu_offload = ["image_encoder"]
_optional_components = ["safety_checker"]
def __init__(
self,
vae: AutoencoderKL,
image_encoder: PaintByExampleImageEncoder,
unet: UNet2DConditionModel,
scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
safety_checker: StableDiffusionSafetyChecker,
feature_extractor: CLIPImageProcessor,
requires_safety_checker: bool = False,
):
super().__init__()
self.register_modules(
vae=vae,
image_encoder=image_encoder,
unet=unet,
scheduler=scheduler,
safety_checker=safety_checker,
feature_extractor=feature_extractor,
)
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
self.register_to_config(requires_safety_checker=requires_safety_checker)
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
def run_safety_checker(self, image, device, dtype):
if self.safety_checker is None:
has_nsfw_concept = None
else:
if torch.is_tensor(image):
feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
else:
feature_extractor_input = self.image_processor.numpy_to_pil(image)
safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
image, has_nsfw_concept = self.safety_checker(
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
)
return image, has_nsfw_concept
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
def prepare_extra_step_kwargs(self, generator, eta):
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
extra_step_kwargs = {}
if accepts_eta:
extra_step_kwargs["eta"] = eta
# check if the scheduler accepts generator
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
extra_step_kwargs["generator"] = generator
return extra_step_kwargs
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
def decode_latents(self, latents):
deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead"
deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False)
latents = 1 / self.vae.config.scaling_factor * latents
image = self.vae.decode(latents, return_dict=False)[0]
image = (image / 2 + 0.5).clamp(0, 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
return image
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_image_variation.StableDiffusionImageVariationPipeline.check_inputs
def check_inputs(self, image, height, width, callback_steps):
if (
not isinstance(image, torch.Tensor)
and not isinstance(image, PIL.Image.Image)
and not isinstance(image, list)
):
raise ValueError(
"`image` has to be of type `torch.FloatTensor` or `PIL.Image.Image` or `List[PIL.Image.Image]` but is"
f" {type(image)}"
)
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(callback_steps)}."
)
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
)
if latents is None:
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
else:
latents = latents.to(device)
# scale the initial noise by the standard deviation required by the scheduler
latents = latents * self.scheduler.init_noise_sigma
return latents
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint.StableDiffusionInpaintPipeline.prepare_mask_latents
def prepare_mask_latents(
self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance
):
# resize the mask to latents shape as we concatenate the mask to the latents
# we do that before converting to dtype to avoid breaking in case we're using cpu_offload
# and half precision
mask = torch.nn.functional.interpolate(
mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor)
)
mask = mask.to(device=device, dtype=dtype)
masked_image = masked_image.to(device=device, dtype=dtype)
if masked_image.shape[1] == 4:
masked_image_latents = masked_image
else:
masked_image_latents = self._encode_vae_image(masked_image, generator=generator)
# duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
if mask.shape[0] < batch_size:
if not batch_size % mask.shape[0] == 0:
raise ValueError(
"The passed mask and the required batch size don't match. Masks are supposed to be duplicated to"
f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number"
" of masks that you pass is divisible by the total requested batch size."
)
mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1)
if masked_image_latents.shape[0] < batch_size:
if not batch_size % masked_image_latents.shape[0] == 0:
raise ValueError(
"The passed images and the required batch size don't match. Images are supposed to be duplicated"
f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed."
" Make sure the number of images that you pass is divisible by the total requested batch size."
)
masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1)
mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask
masked_image_latents = (
torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents
)
# aligning device to prevent device errors when concating it with the latent model input
masked_image_latents = masked_image_latents.to(device=device, dtype=dtype)
return mask, masked_image_latents
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint.StableDiffusionInpaintPipeline._encode_vae_image
def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator):
if isinstance(generator, list):
image_latents = [
retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i])
for i in range(image.shape[0])
]
image_latents = torch.cat(image_latents, dim=0)
else:
image_latents = retrieve_latents(self.vae.encode(image), generator=generator)
image_latents = self.vae.config.scaling_factor * image_latents
return image_latents
def _encode_image(self, image, device, num_images_per_prompt, do_classifier_free_guidance):
dtype = next(self.image_encoder.parameters()).dtype
if not isinstance(image, torch.Tensor):
image = self.feature_extractor(images=image, return_tensors="pt").pixel_values
image = image.to(device=device, dtype=dtype)
image_embeddings, negative_prompt_embeds = self.image_encoder(image, return_uncond_vector=True)
# duplicate image embeddings for each generation per prompt, using mps friendly method
bs_embed, seq_len, _ = image_embeddings.shape
image_embeddings = image_embeddings.repeat(1, num_images_per_prompt, 1)
image_embeddings = image_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
if do_classifier_free_guidance:
negative_prompt_embeds = negative_prompt_embeds.repeat(1, image_embeddings.shape[0], 1)
negative_prompt_embeds = negative_prompt_embeds.view(bs_embed * num_images_per_prompt, 1, -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
image_embeddings = torch.cat([negative_prompt_embeds, image_embeddings])
return image_embeddings
@torch.no_grad()
def __call__(
self,
example_image: Union[torch.FloatTensor, PIL.Image.Image],
image: Union[torch.FloatTensor, PIL.Image.Image],
mask_image: Union[torch.FloatTensor, PIL.Image.Image],
height: Optional[int] = None,
width: Optional[int] = None,
num_inference_steps: int = 50,
guidance_scale: float = 5.0,
negative_prompt: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
callback_steps: int = 1,
):
r"""
The call function to the pipeline for generation.
Args:
example_image (`torch.FloatTensor` or `PIL.Image.Image` or `List[PIL.Image.Image]`):
An example image to guide image generation.
image (`torch.FloatTensor` or `PIL.Image.Image` or `List[PIL.Image.Image]`):
`Image` or tensor representing an image batch to be inpainted (parts of the image are masked out with
`mask_image` and repainted according to `prompt`).
mask_image (`torch.FloatTensor` or `PIL.Image.Image` or `List[PIL.Image.Image]`):
`Image` or tensor representing an image batch to mask `image`. White pixels in the mask are repainted,
while black pixels are preserved. If `mask_image` is a PIL image, it is converted to a single channel
(luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3, so the
expected shape would be `(B, H, W, 1)`.
height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
The height in pixels of the generated image.
width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
The width in pixels of the generated image.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
guidance_scale (`float`, *optional*, defaults to 7.5):
A higher guidance scale value encourages the model to generate images closely linked to the text
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
generation deterministic.
latents (`torch.FloatTensor`, *optional*):
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor is generated by sampling using the supplied random `generator`.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
plain tuple.
callback (`Callable`, *optional*):
A function that calls every `callback_steps` steps during inference. The function is called with the
following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
callback_steps (`int`, *optional*, defaults to 1):
The frequency at which the `callback` function is called. If not specified, the callback is called at
every step.
Example:
```py
>>> import PIL
>>> import requests
>>> import torch
>>> from io import BytesIO
>>> from diffusers import PaintByExamplePipeline
>>> def download_image(url):
... response = requests.get(url)
... return PIL.Image.open(BytesIO(response.content)).convert("RGB")
>>> img_url = (
... "https://raw.githubusercontent.com/Fantasy-Studio/Paint-by-Example/main/examples/image/example_1.png"
... )
>>> mask_url = (
... "https://raw.githubusercontent.com/Fantasy-Studio/Paint-by-Example/main/examples/mask/example_1.png"
... )
>>> example_url = "https://raw.githubusercontent.com/Fantasy-Studio/Paint-by-Example/main/examples/reference/example_1.jpg"
>>> init_image = download_image(img_url).resize((512, 512))
>>> mask_image = download_image(mask_url).resize((512, 512))
>>> example_image = download_image(example_url).resize((512, 512))
>>> pipe = PaintByExamplePipeline.from_pretrained(
... "Fantasy-Studio/Paint-by-Example",
... torch_dtype=torch.float16,
... )
>>> pipe = pipe.to("cuda")
>>> image = pipe(image=init_image, mask_image=mask_image, example_image=example_image).images[0]
>>> image
```
Returns:
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
otherwise a `tuple` is returned where the first element is a list with the generated images and the
second element is a list of `bool`s indicating whether the corresponding generated image contains
"not-safe-for-work" (nsfw) content.
"""
# 1. Define call parameters
if isinstance(image, PIL.Image.Image):
batch_size = 1
elif isinstance(image, list):
batch_size = len(image)
else:
batch_size = image.shape[0]
device = self._execution_device
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
do_classifier_free_guidance = guidance_scale > 1.0
# 2. Preprocess mask and image
mask, masked_image = prepare_mask_and_masked_image(image, mask_image)
height, width = masked_image.shape[-2:]
# 3. Check inputs
self.check_inputs(example_image, height, width, callback_steps)
# 4. Encode input image
image_embeddings = self._encode_image(
example_image, device, num_images_per_prompt, do_classifier_free_guidance
)
# 5. set timesteps
self.scheduler.set_timesteps(num_inference_steps, device=device)
timesteps = self.scheduler.timesteps
# 6. Prepare latent variables
num_channels_latents = self.vae.config.latent_channels
latents = self.prepare_latents(
batch_size * num_images_per_prompt,
num_channels_latents,
height,
width,
image_embeddings.dtype,
device,
generator,
latents,
)
# 7. Prepare mask latent variables
mask, masked_image_latents = self.prepare_mask_latents(
mask,
masked_image,
batch_size * num_images_per_prompt,
height,
width,
image_embeddings.dtype,
device,
generator,
do_classifier_free_guidance,
)
# 8. Check that sizes of mask, masked image and latents match
num_channels_mask = mask.shape[1]
num_channels_masked_image = masked_image_latents.shape[1]
if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
raise ValueError(
f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}"
f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of"
" `pipeline.unet` or your `mask_image` or `image` input."
)
# 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 10. Denoising loop
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
with self.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
# concat latents, mask, masked_image_latents in the channel dimension
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
latent_model_input = torch.cat([latent_model_input, masked_image_latents, mask], dim=1)
# predict the noise residual
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=image_embeddings).sample
# perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
# call the callback, if provided
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
progress_bar.update()
if callback is not None and i % callback_steps == 0:
step_idx = i // getattr(self.scheduler, "order", 1)
callback(step_idx, t, latents)
self.maybe_free_model_hooks()
if not output_type == "latent":
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
image, has_nsfw_concept = self.run_safety_checker(image, device, image_embeddings.dtype)
else:
image = latents
has_nsfw_concept = None
if has_nsfw_concept is None:
do_denormalize = [True] * image.shape[0]
else:
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
| diffusers/src/diffusers/pipelines/paint_by_example/pipeline_paint_by_example.py/0 | {
"file_path": "diffusers/src/diffusers/pipelines/paint_by_example/pipeline_paint_by_example.py",
"repo_id": "diffusers",
"token_count": 12909
} | 136 |
from typing import TYPE_CHECKING
from ...utils import (
DIFFUSERS_SLOW_IMPORT,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_torch_available,
is_transformers_available,
)
_dummy_objects = {}
_import_structure = {}
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils import dummy_torch_and_transformers_objects
_dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
else:
_import_structure["pipeline_stable_cascade"] = ["StableCascadeDecoderPipeline"]
_import_structure["pipeline_stable_cascade_combined"] = ["StableCascadeCombinedPipeline"]
_import_structure["pipeline_stable_cascade_prior"] = ["StableCascadePriorPipeline"]
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_stable_cascade import StableCascadeDecoderPipeline
from .pipeline_stable_cascade_combined import StableCascadeCombinedPipeline
from .pipeline_stable_cascade_prior import StableCascadePriorPipeline
else:
import sys
sys.modules[__name__] = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
module_spec=__spec__,
)
for name, value in _dummy_objects.items():
setattr(sys.modules[__name__], name, value)
| diffusers/src/diffusers/pipelines/stable_cascade/__init__.py/0 | {
"file_path": "diffusers/src/diffusers/pipelines/stable_cascade/__init__.py",
"repo_id": "diffusers",
"token_count": 649
} | 137 |
# Copyright 2024 DiffEdit Authors and Pix2Pix Zero Authors and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import numpy as np
import PIL.Image
import torch
from packaging import version
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from ...configuration_utils import FrozenDict
from ...image_processor import VaeImageProcessor
from ...loaders import LoraLoaderMixin, TextualInversionLoaderMixin
from ...models import AutoencoderKL, UNet2DConditionModel
from ...models.lora import adjust_lora_scale_text_encoder
from ...schedulers import DDIMInverseScheduler, KarrasDiffusionSchedulers
from ...utils import (
PIL_INTERPOLATION,
USE_PEFT_BACKEND,
BaseOutput,
deprecate,
logging,
replace_example_docstring,
scale_lora_layers,
unscale_lora_layers,
)
from ...utils.torch_utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin
from ..stable_diffusion import StableDiffusionPipelineOutput
from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@dataclass
class DiffEditInversionPipelineOutput(BaseOutput):
"""
Output class for Stable Diffusion pipelines.
Args:
latents (`torch.FloatTensor`)
inverted latents tensor
images (`List[PIL.Image.Image]` or `np.ndarray`)
List of denoised PIL images of length `num_timesteps * batch_size` or numpy array of shape `(num_timesteps,
batch_size, height, width, num_channels)`. PIL images or numpy array present the denoised images of the
diffusion pipeline.
"""
latents: torch.FloatTensor
images: Union[List[PIL.Image.Image], np.ndarray]
EXAMPLE_DOC_STRING = """
```py
>>> import PIL
>>> import requests
>>> import torch
>>> from io import BytesIO
>>> from diffusers import StableDiffusionDiffEditPipeline
>>> def download_image(url):
... response = requests.get(url)
... return PIL.Image.open(BytesIO(response.content)).convert("RGB")
>>> img_url = "https://github.com/Xiang-cd/DiffEdit-stable-diffusion/raw/main/assets/origin.png"
>>> init_image = download_image(img_url).resize((768, 768))
>>> pipe = StableDiffusionDiffEditPipeline.from_pretrained(
... "stabilityai/stable-diffusion-2-1", torch_dtype=torch.float16
... )
>>> pipe = pipe.to("cuda")
>>> pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config)
>>> pipeline.inverse_scheduler = DDIMInverseScheduler.from_config(pipeline.scheduler.config)
>>> pipeline.enable_model_cpu_offload()
>>> mask_prompt = "A bowl of fruits"
>>> prompt = "A bowl of pears"
>>> mask_image = pipe.generate_mask(image=init_image, source_prompt=prompt, target_prompt=mask_prompt)
>>> image_latents = pipe.invert(image=init_image, prompt=mask_prompt).latents
>>> image = pipe(prompt=prompt, mask_image=mask_image, image_latents=image_latents).images[0]
```
"""
EXAMPLE_INVERT_DOC_STRING = """
```py
>>> import PIL
>>> import requests
>>> import torch
>>> from io import BytesIO
>>> from diffusers import StableDiffusionDiffEditPipeline
>>> def download_image(url):
... response = requests.get(url)
... return PIL.Image.open(BytesIO(response.content)).convert("RGB")
>>> img_url = "https://github.com/Xiang-cd/DiffEdit-stable-diffusion/raw/main/assets/origin.png"
>>> init_image = download_image(img_url).resize((768, 768))
>>> pipe = StableDiffusionDiffEditPipeline.from_pretrained(
... "stabilityai/stable-diffusion-2-1", torch_dtype=torch.float16
... )
>>> pipe = pipe.to("cuda")
>>> pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config)
>>> pipeline.inverse_scheduler = DDIMInverseScheduler.from_config(pipeline.scheduler.config)
>>> pipeline.enable_model_cpu_offload()
>>> prompt = "A bowl of fruits"
>>> inverted_latents = pipe.invert(image=init_image, prompt=prompt).latents
```
"""
def auto_corr_loss(hidden_states, generator=None):
reg_loss = 0.0
for i in range(hidden_states.shape[0]):
for j in range(hidden_states.shape[1]):
noise = hidden_states[i : i + 1, j : j + 1, :, :]
while True:
roll_amount = torch.randint(noise.shape[2] // 2, (1,), generator=generator).item()
reg_loss += (noise * torch.roll(noise, shifts=roll_amount, dims=2)).mean() ** 2
reg_loss += (noise * torch.roll(noise, shifts=roll_amount, dims=3)).mean() ** 2
if noise.shape[2] <= 8:
break
noise = torch.nn.functional.avg_pool2d(noise, kernel_size=2)
return reg_loss
def kl_divergence(hidden_states):
return hidden_states.var() + hidden_states.mean() ** 2 - 1 - torch.log(hidden_states.var() + 1e-7)
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.preprocess
def preprocess(image):
deprecation_message = "The preprocess method is deprecated and will be removed in diffusers 1.0.0. Please use VaeImageProcessor.preprocess(...) instead"
deprecate("preprocess", "1.0.0", deprecation_message, standard_warn=False)
if isinstance(image, torch.Tensor):
return image
elif isinstance(image, PIL.Image.Image):
image = [image]
if isinstance(image[0], PIL.Image.Image):
w, h = image[0].size
w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image]
image = np.concatenate(image, axis=0)
image = np.array(image).astype(np.float32) / 255.0
image = image.transpose(0, 3, 1, 2)
image = 2.0 * image - 1.0
image = torch.from_numpy(image)
elif isinstance(image[0], torch.Tensor):
image = torch.cat(image, dim=0)
return image
def preprocess_mask(mask, batch_size: int = 1):
if not isinstance(mask, torch.Tensor):
# preprocess mask
if isinstance(mask, PIL.Image.Image) or isinstance(mask, np.ndarray):
mask = [mask]
if isinstance(mask, list):
if isinstance(mask[0], PIL.Image.Image):
mask = [np.array(m.convert("L")).astype(np.float32) / 255.0 for m in mask]
if isinstance(mask[0], np.ndarray):
mask = np.stack(mask, axis=0) if mask[0].ndim < 3 else np.concatenate(mask, axis=0)
mask = torch.from_numpy(mask)
elif isinstance(mask[0], torch.Tensor):
mask = torch.stack(mask, dim=0) if mask[0].ndim < 3 else torch.cat(mask, dim=0)
# Batch and add channel dim for single mask
if mask.ndim == 2:
mask = mask.unsqueeze(0).unsqueeze(0)
# Batch single mask or add channel dim
if mask.ndim == 3:
# Single batched mask, no channel dim or single mask not batched but channel dim
if mask.shape[0] == 1:
mask = mask.unsqueeze(0)
# Batched masks no channel dim
else:
mask = mask.unsqueeze(1)
# Check mask shape
if batch_size > 1:
if mask.shape[0] == 1:
mask = torch.cat([mask] * batch_size)
elif mask.shape[0] > 1 and mask.shape[0] != batch_size:
raise ValueError(
f"`mask_image` with batch size {mask.shape[0]} cannot be broadcasted to batch size {batch_size} "
f"inferred by prompt inputs"
)
if mask.shape[1] != 1:
raise ValueError(f"`mask_image` must have 1 channel, but has {mask.shape[1]} channels")
# Check mask is in [0, 1]
if mask.min() < 0 or mask.max() > 1:
raise ValueError("`mask_image` should be in [0, 1] range")
# Binarize mask
mask[mask < 0.5] = 0
mask[mask >= 0.5] = 1
return mask
class StableDiffusionDiffEditPipeline(
DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, LoraLoaderMixin
):
r"""
<Tip warning={true}>
This is an experimental feature!
</Tip>
Pipeline for text-guided image inpainting using Stable Diffusion and DiffEdit.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
The pipeline also inherits the following loading and saving methods:
- [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
- [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
- [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
Args:
vae ([`AutoencoderKL`]):
Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
text_encoder ([`~transformers.CLIPTextModel`]):
Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
tokenizer ([`~transformers.CLIPTokenizer`]):
A `CLIPTokenizer` to tokenize text.
unet ([`UNet2DConditionModel`]):
A `UNet2DConditionModel` to denoise the encoded image latents.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latents.
inverse_scheduler ([`DDIMInverseScheduler`]):
A scheduler to be used in combination with `unet` to fill in the unmasked part of the input latents.
safety_checker ([`StableDiffusionSafetyChecker`]):
Classification module that estimates whether generated images could be considered offensive or harmful.
Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
about a model's potential harms.
feature_extractor ([`~transformers.CLIPImageProcessor`]):
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
"""
model_cpu_offload_seq = "text_encoder->unet->vae"
_optional_components = ["safety_checker", "feature_extractor", "inverse_scheduler"]
_exclude_from_cpu_offload = ["safety_checker"]
def __init__(
self,
vae: AutoencoderKL,
text_encoder: CLIPTextModel,
tokenizer: CLIPTokenizer,
unet: UNet2DConditionModel,
scheduler: KarrasDiffusionSchedulers,
safety_checker: StableDiffusionSafetyChecker,
feature_extractor: CLIPImageProcessor,
inverse_scheduler: DDIMInverseScheduler,
requires_safety_checker: bool = True,
):
super().__init__()
if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
deprecation_message = (
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
new_config = dict(scheduler.config)
new_config["steps_offset"] = 1
scheduler._internal_dict = FrozenDict(new_config)
if hasattr(scheduler.config, "skip_prk_steps") and scheduler.config.skip_prk_steps is False:
deprecation_message = (
f"The configuration file of this scheduler: {scheduler} has not set the configuration"
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set", "1.0.0", deprecation_message, standard_warn=False)
new_config = dict(scheduler.config)
new_config["skip_prk_steps"] = True
scheduler._internal_dict = FrozenDict(new_config)
if safety_checker is None and requires_safety_checker:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
)
if safety_checker is not None and feature_extractor is None:
raise ValueError(
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
)
is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
version.parse(unet.config._diffusers_version).base_version
) < version.parse("0.9.0.dev0")
is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
deprecation_message = (
"The configuration file of the unet has set the default `sample_size` to smaller than"
" 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the"
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
" \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
" in the config might lead to incorrect results in future versions. If you have downloaded this"
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
" the `unet/config.json` file"
)
deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
new_config = dict(unet.config)
new_config["sample_size"] = 64
unet._internal_dict = FrozenDict(new_config)
self.register_modules(
vae=vae,
text_encoder=text_encoder,
tokenizer=tokenizer,
unet=unet,
scheduler=scheduler,
safety_checker=safety_checker,
feature_extractor=feature_extractor,
inverse_scheduler=inverse_scheduler,
)
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
self.register_to_config(requires_safety_checker=requires_safety_checker)
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
def _encode_prompt(
self,
prompt,
device,
num_images_per_prompt,
do_classifier_free_guidance,
negative_prompt=None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
lora_scale: Optional[float] = None,
**kwargs,
):
deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple."
deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False)
prompt_embeds_tuple = self.encode_prompt(
prompt=prompt,
device=device,
num_images_per_prompt=num_images_per_prompt,
do_classifier_free_guidance=do_classifier_free_guidance,
negative_prompt=negative_prompt,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
lora_scale=lora_scale,
**kwargs,
)
# concatenate for backwards comp
prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]])
return prompt_embeds
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt
def encode_prompt(
self,
prompt,
device,
num_images_per_prompt,
do_classifier_free_guidance,
negative_prompt=None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
lora_scale: Optional[float] = None,
clip_skip: Optional[int] = None,
):
r"""
Encodes the prompt into text encoder hidden states.
Args:
prompt (`str` or `List[str]`, *optional*):
prompt to be encoded
device: (`torch.device`):
torch device
num_images_per_prompt (`int`):
number of images that should be generated per prompt
do_classifier_free_guidance (`bool`):
whether to use classifier free guidance or not
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
lora_scale (`float`, *optional*):
A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
clip_skip (`int`, *optional*):
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
the output of the pre-final layer will be used for computing the prompt embeddings.
"""
# set lora scale so that monkey patched LoRA
# function of text encoder can correctly access it
if lora_scale is not None and isinstance(self, LoraLoaderMixin):
self._lora_scale = lora_scale
# dynamically adjust the LoRA scale
if not USE_PEFT_BACKEND:
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
else:
scale_lora_layers(self.text_encoder, lora_scale)
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
if prompt_embeds is None:
# textual inversion: process multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin):
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
text_inputs = self.tokenizer(
prompt,
padding="max_length",
max_length=self.tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
text_input_ids, untruncated_ids
):
removed_text = self.tokenizer.batch_decode(
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
)
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
)
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
attention_mask = text_inputs.attention_mask.to(device)
else:
attention_mask = None
if clip_skip is None:
prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
prompt_embeds = prompt_embeds[0]
else:
prompt_embeds = self.text_encoder(
text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
)
# Access the `hidden_states` first, that contains a tuple of
# all the hidden states from the encoder layers. Then index into
# the tuple to access the hidden states from the desired layer.
prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
# We also need to apply the final LayerNorm here to not mess with the
# representations. The `last_hidden_states` that we typically use for
# obtaining the final prompt representations passes through the LayerNorm
# layer.
prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
if self.text_encoder is not None:
prompt_embeds_dtype = self.text_encoder.dtype
elif self.unet is not None:
prompt_embeds_dtype = self.unet.dtype
else:
prompt_embeds_dtype = prompt_embeds.dtype
prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
bs_embed, seq_len, _ = prompt_embeds.shape
# duplicate text embeddings for each generation per prompt, using mps friendly method
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance and negative_prompt_embeds is None:
uncond_tokens: List[str]
if negative_prompt is None:
uncond_tokens = [""] * batch_size
elif prompt is not None and type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
f" {type(prompt)}."
)
elif isinstance(negative_prompt, str):
uncond_tokens = [negative_prompt]
elif batch_size != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`."
)
else:
uncond_tokens = negative_prompt
# textual inversion: process multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin):
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
max_length = prompt_embeds.shape[1]
uncond_input = self.tokenizer(
uncond_tokens,
padding="max_length",
max_length=max_length,
truncation=True,
return_tensors="pt",
)
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
attention_mask = uncond_input.attention_mask.to(device)
else:
attention_mask = None
negative_prompt_embeds = self.text_encoder(
uncond_input.input_ids.to(device),
attention_mask=attention_mask,
)
negative_prompt_embeds = negative_prompt_embeds[0]
if do_classifier_free_guidance:
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
seq_len = negative_prompt_embeds.shape[1]
negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder, lora_scale)
return prompt_embeds, negative_prompt_embeds
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
def run_safety_checker(self, image, device, dtype):
if self.safety_checker is None:
has_nsfw_concept = None
else:
if torch.is_tensor(image):
feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
else:
feature_extractor_input = self.image_processor.numpy_to_pil(image)
safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
image, has_nsfw_concept = self.safety_checker(
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
)
return image, has_nsfw_concept
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
def prepare_extra_step_kwargs(self, generator, eta):
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
extra_step_kwargs = {}
if accepts_eta:
extra_step_kwargs["eta"] = eta
# check if the scheduler accepts generator
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
extra_step_kwargs["generator"] = generator
return extra_step_kwargs
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
def decode_latents(self, latents):
deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead"
deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False)
latents = 1 / self.vae.config.scaling_factor * latents
image = self.vae.decode(latents, return_dict=False)[0]
image = (image / 2 + 0.5).clamp(0, 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
return image
def check_inputs(
self,
prompt,
strength,
callback_steps,
negative_prompt=None,
prompt_embeds=None,
negative_prompt_embeds=None,
):
if (strength is None) or (strength is not None and (strength < 0 or strength > 1)):
raise ValueError(
f"The value of `strength` should in [0.0, 1.0] but is, but is {strength} of type {type(strength)}."
)
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(callback_steps)}."
)
if prompt is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
" only forward one of the two."
)
elif prompt is None and prompt_embeds is None:
raise ValueError(
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
)
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
if negative_prompt is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
if prompt_embeds is not None and negative_prompt_embeds is not None:
if prompt_embeds.shape != negative_prompt_embeds.shape:
raise ValueError(
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
f" {negative_prompt_embeds.shape}."
)
def check_source_inputs(
self,
source_prompt=None,
source_negative_prompt=None,
source_prompt_embeds=None,
source_negative_prompt_embeds=None,
):
if source_prompt is not None and source_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `source_prompt`: {source_prompt} and `source_prompt_embeds`: {source_prompt_embeds}."
" Please make sure to only forward one of the two."
)
elif source_prompt is None and source_prompt_embeds is None:
raise ValueError(
"Provide either `source_image` or `source_prompt_embeds`. Cannot leave all both of the arguments undefined."
)
elif source_prompt is not None and (
not isinstance(source_prompt, str) and not isinstance(source_prompt, list)
):
raise ValueError(f"`source_prompt` has to be of type `str` or `list` but is {type(source_prompt)}")
if source_negative_prompt is not None and source_negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `source_negative_prompt`: {source_negative_prompt} and `source_negative_prompt_embeds`:"
f" {source_negative_prompt_embeds}. Please make sure to only forward one of the two."
)
if source_prompt_embeds is not None and source_negative_prompt_embeds is not None:
if source_prompt_embeds.shape != source_negative_prompt_embeds.shape:
raise ValueError(
"`source_prompt_embeds` and `source_negative_prompt_embeds` must have the same shape when passed"
f" directly, but got: `source_prompt_embeds` {source_prompt_embeds.shape} !="
f" `source_negative_prompt_embeds` {source_negative_prompt_embeds.shape}."
)
def get_timesteps(self, num_inference_steps, strength, device):
# get the original timestep using init_timestep
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
t_start = max(num_inference_steps - init_timestep, 0)
timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
return timesteps, num_inference_steps - t_start
def get_inverse_timesteps(self, num_inference_steps, strength, device):
# get the original timestep using init_timestep
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
t_start = max(num_inference_steps - init_timestep, 0)
# safety for t_start overflow to prevent empty timsteps slice
if t_start == 0:
return self.inverse_scheduler.timesteps, num_inference_steps
timesteps = self.inverse_scheduler.timesteps[:-t_start]
return timesteps, num_inference_steps - t_start
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
)
if latents is None:
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
else:
latents = latents.to(device)
# scale the initial noise by the standard deviation required by the scheduler
latents = latents * self.scheduler.init_noise_sigma
return latents
def prepare_image_latents(self, image, batch_size, dtype, device, generator=None):
if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
)
image = image.to(device=device, dtype=dtype)
if image.shape[1] == 4:
latents = image
else:
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
)
if isinstance(generator, list):
latents = [
self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)
]
latents = torch.cat(latents, dim=0)
else:
latents = self.vae.encode(image).latent_dist.sample(generator)
latents = self.vae.config.scaling_factor * latents
if batch_size != latents.shape[0]:
if batch_size % latents.shape[0] == 0:
# expand image_latents for batch_size
deprecation_message = (
f"You have passed {batch_size} text prompts (`prompt`), but only {latents.shape[0]} initial"
" images (`image`). Initial images are now duplicating to match the number of text prompts. Note"
" that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update"
" your script to pass as many initial images as text prompts to suppress this warning."
)
deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False)
additional_latents_per_image = batch_size // latents.shape[0]
latents = torch.cat([latents] * additional_latents_per_image, dim=0)
else:
raise ValueError(
f"Cannot duplicate `image` of batch size {latents.shape[0]} to {batch_size} text prompts."
)
else:
latents = torch.cat([latents], dim=0)
return latents
def get_epsilon(self, model_output: torch.Tensor, sample: torch.Tensor, timestep: int):
pred_type = self.inverse_scheduler.config.prediction_type
alpha_prod_t = self.inverse_scheduler.alphas_cumprod[timestep]
beta_prod_t = 1 - alpha_prod_t
if pred_type == "epsilon":
return model_output
elif pred_type == "sample":
return (sample - alpha_prod_t ** (0.5) * model_output) / beta_prod_t ** (0.5)
elif pred_type == "v_prediction":
return (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f"prediction_type given as {pred_type} must be one of `epsilon`, `sample`, or `v_prediction`"
)
@torch.no_grad()
@replace_example_docstring(EXAMPLE_DOC_STRING)
def generate_mask(
self,
image: Union[torch.FloatTensor, PIL.Image.Image] = None,
target_prompt: Optional[Union[str, List[str]]] = None,
target_negative_prompt: Optional[Union[str, List[str]]] = None,
target_prompt_embeds: Optional[torch.FloatTensor] = None,
target_negative_prompt_embeds: Optional[torch.FloatTensor] = None,
source_prompt: Optional[Union[str, List[str]]] = None,
source_negative_prompt: Optional[Union[str, List[str]]] = None,
source_prompt_embeds: Optional[torch.FloatTensor] = None,
source_negative_prompt_embeds: Optional[torch.FloatTensor] = None,
num_maps_per_mask: Optional[int] = 10,
mask_encode_strength: Optional[float] = 0.5,
mask_thresholding_ratio: Optional[float] = 3.0,
num_inference_steps: int = 50,
guidance_scale: float = 7.5,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
output_type: Optional[str] = "np",
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
):
r"""
Generate a latent mask given a mask prompt, a target prompt, and an image.
Args:
image (`PIL.Image.Image`):
`Image` or tensor representing an image batch to be used for computing the mask.
target_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide semantic mask generation. If not defined, you need to pass
`prompt_embeds`.
target_negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
target_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
provided, text embeddings are generated from the `prompt` input argument.
target_negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
source_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide semantic mask generation using DiffEdit. If not defined, you need to
pass `source_prompt_embeds` or `source_image` instead.
source_negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide semantic mask generation away from using DiffEdit. If not defined, you
need to pass `source_negative_prompt_embeds` or `source_image` instead.
source_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings to guide the semantic mask generation. Can be used to easily tweak text
inputs (prompt weighting). If not provided, text embeddings are generated from `source_prompt` input
argument.
source_negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings to negatively guide the semantic mask generation. Can be used to easily
tweak text inputs (prompt weighting). If not provided, text embeddings are generated from
`source_negative_prompt` input argument.
num_maps_per_mask (`int`, *optional*, defaults to 10):
The number of noise maps sampled to generate the semantic mask using DiffEdit.
mask_encode_strength (`float`, *optional*, defaults to 0.5):
The strength of the noise maps sampled to generate the semantic mask using DiffEdit. Must be between 0
and 1.
mask_thresholding_ratio (`float`, *optional*, defaults to 3.0):
The maximum multiple of the mean absolute difference used to clamp the semantic guidance map before
mask binarization.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
guidance_scale (`float`, *optional*, defaults to 7.5):
A higher guidance scale value encourages the model to generate images closely linked to the text
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
generation deterministic.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the
[`~models.attention_processor.AttnProcessor`] as defined in
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
Examples:
Returns:
`List[PIL.Image.Image]` or `np.array`:
When returning a `List[PIL.Image.Image]`, the list consists of a batch of single-channel binary images
with dimensions `(height // self.vae_scale_factor, width // self.vae_scale_factor)`. If it's
`np.array`, the shape is `(batch_size, height // self.vae_scale_factor, width //
self.vae_scale_factor)`.
"""
# 1. Check inputs (Provide dummy argument for callback_steps)
self.check_inputs(
target_prompt,
mask_encode_strength,
1,
target_negative_prompt,
target_prompt_embeds,
target_negative_prompt_embeds,
)
self.check_source_inputs(
source_prompt,
source_negative_prompt,
source_prompt_embeds,
source_negative_prompt_embeds,
)
if (num_maps_per_mask is None) or (
num_maps_per_mask is not None and (not isinstance(num_maps_per_mask, int) or num_maps_per_mask <= 0)
):
raise ValueError(
f"`num_maps_per_mask` has to be a positive integer but is {num_maps_per_mask} of type"
f" {type(num_maps_per_mask)}."
)
if mask_thresholding_ratio is None or mask_thresholding_ratio <= 0:
raise ValueError(
f"`mask_thresholding_ratio` has to be positive but is {mask_thresholding_ratio} of type"
f" {type(mask_thresholding_ratio)}."
)
# 2. Define call parameters
if target_prompt is not None and isinstance(target_prompt, str):
batch_size = 1
elif target_prompt is not None and isinstance(target_prompt, list):
batch_size = len(target_prompt)
else:
batch_size = target_prompt_embeds.shape[0]
if cross_attention_kwargs is None:
cross_attention_kwargs = {}
device = self._execution_device
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
do_classifier_free_guidance = guidance_scale > 1.0
# 3. Encode input prompts
(cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None)
target_negative_prompt_embeds, target_prompt_embeds = self.encode_prompt(
target_prompt,
device,
num_maps_per_mask,
do_classifier_free_guidance,
target_negative_prompt,
prompt_embeds=target_prompt_embeds,
negative_prompt_embeds=target_negative_prompt_embeds,
)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
if do_classifier_free_guidance:
target_prompt_embeds = torch.cat([target_negative_prompt_embeds, target_prompt_embeds])
source_negative_prompt_embeds, source_prompt_embeds = self.encode_prompt(
source_prompt,
device,
num_maps_per_mask,
do_classifier_free_guidance,
source_negative_prompt,
prompt_embeds=source_prompt_embeds,
negative_prompt_embeds=source_negative_prompt_embeds,
)
if do_classifier_free_guidance:
source_prompt_embeds = torch.cat([source_negative_prompt_embeds, source_prompt_embeds])
# 4. Preprocess image
image = self.image_processor.preprocess(image).repeat_interleave(num_maps_per_mask, dim=0)
# 5. Set timesteps
self.scheduler.set_timesteps(num_inference_steps, device=device)
timesteps, _ = self.get_timesteps(num_inference_steps, mask_encode_strength, device)
encode_timestep = timesteps[0]
# 6. Prepare image latents and add noise with specified strength
image_latents = self.prepare_image_latents(
image, batch_size * num_maps_per_mask, self.vae.dtype, device, generator
)
noise = randn_tensor(image_latents.shape, generator=generator, device=device, dtype=self.vae.dtype)
image_latents = self.scheduler.add_noise(image_latents, noise, encode_timestep)
latent_model_input = torch.cat([image_latents] * (4 if do_classifier_free_guidance else 2))
latent_model_input = self.scheduler.scale_model_input(latent_model_input, encode_timestep)
# 7. Predict the noise residual
prompt_embeds = torch.cat([source_prompt_embeds, target_prompt_embeds])
noise_pred = self.unet(
latent_model_input,
encode_timestep,
encoder_hidden_states=prompt_embeds,
cross_attention_kwargs=cross_attention_kwargs,
).sample
if do_classifier_free_guidance:
noise_pred_neg_src, noise_pred_source, noise_pred_uncond, noise_pred_target = noise_pred.chunk(4)
noise_pred_source = noise_pred_neg_src + guidance_scale * (noise_pred_source - noise_pred_neg_src)
noise_pred_target = noise_pred_uncond + guidance_scale * (noise_pred_target - noise_pred_uncond)
else:
noise_pred_source, noise_pred_target = noise_pred.chunk(2)
# 8. Compute the mask from the absolute difference of predicted noise residuals
# TODO: Consider smoothing mask guidance map
mask_guidance_map = (
torch.abs(noise_pred_target - noise_pred_source)
.reshape(batch_size, num_maps_per_mask, *noise_pred_target.shape[-3:])
.mean([1, 2])
)
clamp_magnitude = mask_guidance_map.mean() * mask_thresholding_ratio
semantic_mask_image = mask_guidance_map.clamp(0, clamp_magnitude) / clamp_magnitude
semantic_mask_image = torch.where(semantic_mask_image <= 0.5, 0, 1)
mask_image = semantic_mask_image.cpu().numpy()
# 9. Convert to Numpy array or PIL.
if output_type == "pil":
mask_image = self.image_processor.numpy_to_pil(mask_image)
# Offload all models
self.maybe_free_model_hooks()
return mask_image
@torch.no_grad()
@replace_example_docstring(EXAMPLE_INVERT_DOC_STRING)
def invert(
self,
prompt: Optional[Union[str, List[str]]] = None,
image: Union[torch.FloatTensor, PIL.Image.Image] = None,
num_inference_steps: int = 50,
inpaint_strength: float = 0.8,
guidance_scale: float = 7.5,
negative_prompt: Optional[Union[str, List[str]]] = None,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
decode_latents: bool = False,
output_type: Optional[str] = "pil",
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
callback_steps: Optional[int] = 1,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
lambda_auto_corr: float = 20.0,
lambda_kl: float = 20.0,
num_reg_steps: int = 0,
num_auto_corr_rolls: int = 5,
):
r"""
Generate inverted latents given a prompt and image.
Args:
prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
image (`PIL.Image.Image`):
`Image` or tensor representing an image batch to produce the inverted latents guided by `prompt`.
inpaint_strength (`float`, *optional*, defaults to 0.8):
Indicates extent of the noising process to run latent inversion. Must be between 0 and 1. When
`inpaint_strength` is 1, the inversion process is run for the full number of iterations specified in
`num_inference_steps`. `image` is used as a reference for the inversion process, and adding more noise
increases `inpaint_strength`. If `inpaint_strength` is 0, no inpainting occurs.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
guidance_scale (`float`, *optional*, defaults to 7.5):
A higher guidance scale value encourages the model to generate images closely linked to the text
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
generator (`torch.Generator`, *optional*):
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
generation deterministic.
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
provided, text embeddings are generated from the `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
decode_latents (`bool`, *optional*, defaults to `False`):
Whether or not to decode the inverted latents into a generated image. Setting this argument to `True`
decodes all inverted latents for each timestep into a list of generated images.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.stable_diffusion.DiffEditInversionPipelineOutput`] instead of a
plain tuple.
callback (`Callable`, *optional*):
A function that calls every `callback_steps` steps during inference. The function is called with the
following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
callback_steps (`int`, *optional*, defaults to 1):
The frequency at which the `callback` function is called. If not specified, the callback is called at
every step.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the
[`~models.attention_processor.AttnProcessor`] as defined in
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
lambda_auto_corr (`float`, *optional*, defaults to 20.0):
Lambda parameter to control auto correction.
lambda_kl (`float`, *optional*, defaults to 20.0):
Lambda parameter to control Kullback-Leibler divergence output.
num_reg_steps (`int`, *optional*, defaults to 0):
Number of regularization loss steps.
num_auto_corr_rolls (`int`, *optional*, defaults to 5):
Number of auto correction roll steps.
Examples:
Returns:
[`~pipelines.stable_diffusion.pipeline_stable_diffusion_diffedit.DiffEditInversionPipelineOutput`] or
`tuple`:
If `return_dict` is `True`,
[`~pipelines.stable_diffusion.pipeline_stable_diffusion_diffedit.DiffEditInversionPipelineOutput`] is
returned, otherwise a `tuple` is returned where the first element is the inverted latents tensors
ordered by increasing noise, and the second is the corresponding decoded images if `decode_latents` is
`True`, otherwise `None`.
"""
# 1. Check inputs
self.check_inputs(
prompt,
inpaint_strength,
callback_steps,
negative_prompt,
prompt_embeds,
negative_prompt_embeds,
)
if image is None:
raise ValueError("`image` input cannot be undefined.")
# 2. Define call parameters
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
if cross_attention_kwargs is None:
cross_attention_kwargs = {}
device = self._execution_device
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
do_classifier_free_guidance = guidance_scale > 1.0
# 3. Preprocess image
image = self.image_processor.preprocess(image)
# 4. Prepare latent variables
num_images_per_prompt = 1
latents = self.prepare_image_latents(
image, batch_size * num_images_per_prompt, self.vae.dtype, device, generator
)
# 5. Encode input prompt
prompt_embeds, negative_prompt_embeds = self.encode_prompt(
prompt,
device,
num_images_per_prompt,
do_classifier_free_guidance,
negative_prompt,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
if do_classifier_free_guidance:
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
# 6. Prepare timesteps
self.inverse_scheduler.set_timesteps(num_inference_steps, device=device)
timesteps, num_inference_steps = self.get_inverse_timesteps(num_inference_steps, inpaint_strength, device)
# 7. Noising loop where we obtain the intermediate noised latent image for each timestep.
num_warmup_steps = len(timesteps) - num_inference_steps * self.inverse_scheduler.order
inverted_latents = []
with self.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
latent_model_input = self.inverse_scheduler.scale_model_input(latent_model_input, t)
# predict the noise residual
noise_pred = self.unet(
latent_model_input,
t,
encoder_hidden_states=prompt_embeds,
cross_attention_kwargs=cross_attention_kwargs,
).sample
# perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# regularization of the noise prediction (not in original code or paper but borrowed from Pix2PixZero)
if num_reg_steps > 0:
with torch.enable_grad():
for _ in range(num_reg_steps):
if lambda_auto_corr > 0:
for _ in range(num_auto_corr_rolls):
var = torch.autograd.Variable(noise_pred.detach().clone(), requires_grad=True)
# Derive epsilon from model output before regularizing to IID standard normal
var_epsilon = self.get_epsilon(var, latent_model_input.detach(), t)
l_ac = auto_corr_loss(var_epsilon, generator=generator)
l_ac.backward()
grad = var.grad.detach() / num_auto_corr_rolls
noise_pred = noise_pred - lambda_auto_corr * grad
if lambda_kl > 0:
var = torch.autograd.Variable(noise_pred.detach().clone(), requires_grad=True)
# Derive epsilon from model output before regularizing to IID standard normal
var_epsilon = self.get_epsilon(var, latent_model_input.detach(), t)
l_kld = kl_divergence(var_epsilon)
l_kld.backward()
grad = var.grad.detach()
noise_pred = noise_pred - lambda_kl * grad
noise_pred = noise_pred.detach()
# compute the previous noisy sample x_t -> x_t-1
latents = self.inverse_scheduler.step(noise_pred, t, latents).prev_sample
inverted_latents.append(latents.detach().clone())
# call the callback, if provided
if i == len(timesteps) - 1 or (
(i + 1) > num_warmup_steps and (i + 1) % self.inverse_scheduler.order == 0
):
progress_bar.update()
if callback is not None and i % callback_steps == 0:
step_idx = i // getattr(self.scheduler, "order", 1)
callback(step_idx, t, latents)
assert len(inverted_latents) == len(timesteps)
latents = torch.stack(list(reversed(inverted_latents)), 1)
# 8. Post-processing
image = None
if decode_latents:
image = self.decode_latents(latents.flatten(0, 1))
# 9. Convert to PIL.
if decode_latents and output_type == "pil":
image = self.image_processor.numpy_to_pil(image)
# Offload all models
self.maybe_free_model_hooks()
if not return_dict:
return (latents, image)
return DiffEditInversionPipelineOutput(latents=latents, images=image)
@torch.no_grad()
@replace_example_docstring(EXAMPLE_DOC_STRING)
def __call__(
self,
prompt: Optional[Union[str, List[str]]] = None,
mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None,
image_latents: Union[torch.FloatTensor, PIL.Image.Image] = None,
inpaint_strength: Optional[float] = 0.8,
num_inference_steps: int = 50,
guidance_scale: float = 7.5,
negative_prompt: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
callback_steps: int = 1,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
clip_skip: int = None,
):
r"""
The call function to the pipeline for generation.
Args:
prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
mask_image (`PIL.Image.Image`):
`Image` or tensor representing an image batch to mask the generated image. White pixels in the mask are
repainted, while black pixels are preserved. If `mask_image` is a PIL image, it is converted to a
single channel (luminance) before use. If it's a tensor, it should contain one color channel (L)
instead of 3, so the expected shape would be `(B, 1, H, W)`.
image_latents (`PIL.Image.Image` or `torch.FloatTensor`):
Partially noised image latents from the inversion process to be used as inputs for image generation.
inpaint_strength (`float`, *optional*, defaults to 0.8):
Indicates extent to inpaint the masked area. Must be between 0 and 1. When `inpaint_strength` is 1, the
denoising process is run on the masked area for the full number of iterations specified in
`num_inference_steps`. `image_latents` is used as a reference for the masked area, and adding more
noise to a region increases `inpaint_strength`. If `inpaint_strength` is 0, no inpainting occurs.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
guidance_scale (`float`, *optional*, defaults to 7.5):
A higher guidance scale value encourages the model to generate images closely linked to the text
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
generator (`torch.Generator`, *optional*):
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
generation deterministic.
latents (`torch.FloatTensor`, *optional*):
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor is generated by sampling using the supplied random `generator`.
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
provided, text embeddings are generated from the `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
plain tuple.
callback (`Callable`, *optional*):
A function that calls every `callback_steps` steps during inference. The function is called with the
following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
callback_steps (`int`, *optional*, defaults to 1):
The frequency at which the `callback` function is called. If not specified, the callback is called at
every step.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
clip_skip (`int`, *optional*):
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
the output of the pre-final layer will be used for computing the prompt embeddings.
Examples:
Returns:
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
otherwise a `tuple` is returned where the first element is a list with the generated images and the
second element is a list of `bool`s indicating whether the corresponding generated image contains
"not-safe-for-work" (nsfw) content.
"""
# 1. Check inputs
self.check_inputs(
prompt,
inpaint_strength,
callback_steps,
negative_prompt,
prompt_embeds,
negative_prompt_embeds,
)
if mask_image is None:
raise ValueError(
"`mask_image` input cannot be undefined. Use `generate_mask()` to compute `mask_image` from text prompts."
)
if image_latents is None:
raise ValueError(
"`image_latents` input cannot be undefined. Use `invert()` to compute `image_latents` from input images."
)
# 2. Define call parameters
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
if cross_attention_kwargs is None:
cross_attention_kwargs = {}
device = self._execution_device
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
do_classifier_free_guidance = guidance_scale > 1.0
# 3. Encode input prompt
text_encoder_lora_scale = (
cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
)
prompt_embeds, negative_prompt_embeds = self.encode_prompt(
prompt,
device,
num_images_per_prompt,
do_classifier_free_guidance,
negative_prompt,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
lora_scale=text_encoder_lora_scale,
clip_skip=clip_skip,
)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
if do_classifier_free_guidance:
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
# 4. Preprocess mask
mask_image = preprocess_mask(mask_image, batch_size)
latent_height, latent_width = mask_image.shape[-2:]
mask_image = torch.cat([mask_image] * num_images_per_prompt)
mask_image = mask_image.to(device=device, dtype=prompt_embeds.dtype)
# 5. Set timesteps
self.scheduler.set_timesteps(num_inference_steps, device=device)
timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, inpaint_strength, device)
# 6. Preprocess image latents
if isinstance(image_latents, list) and any(isinstance(l, torch.Tensor) and l.ndim == 5 for l in image_latents):
image_latents = torch.cat(image_latents).detach()
elif isinstance(image_latents, torch.Tensor) and image_latents.ndim == 5:
image_latents = image_latents.detach()
else:
image_latents = self.image_processor.preprocess(image_latents).detach()
latent_shape = (self.vae.config.latent_channels, latent_height, latent_width)
if image_latents.shape[-3:] != latent_shape:
raise ValueError(
f"Each latent image in `image_latents` must have shape {latent_shape}, "
f"but has shape {image_latents.shape[-3:]}"
)
if image_latents.ndim == 4:
image_latents = image_latents.reshape(batch_size, len(timesteps), *latent_shape)
if image_latents.shape[:2] != (batch_size, len(timesteps)):
raise ValueError(
f"`image_latents` must have batch size {batch_size} with latent images from {len(timesteps)}"
f" timesteps, but has batch size {image_latents.shape[0]} with latent images from"
f" {image_latents.shape[1]} timesteps."
)
image_latents = image_latents.transpose(0, 1).repeat_interleave(num_images_per_prompt, dim=1)
image_latents = image_latents.to(device=device, dtype=prompt_embeds.dtype)
# 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 8. Denoising loop
latents = image_latents[0].clone()
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
with self.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
# predict the noise residual
noise_pred = self.unet(
latent_model_input,
t,
encoder_hidden_states=prompt_embeds,
cross_attention_kwargs=cross_attention_kwargs,
).sample
# perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
# mask with inverted latents from appropriate timestep - use original image latent for last step
latents = latents * mask_image + image_latents[i] * (1 - mask_image)
# call the callback, if provided
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
progress_bar.update()
if callback is not None and i % callback_steps == 0:
step_idx = i // getattr(self.scheduler, "order", 1)
callback(step_idx, t, latents)
if not output_type == "latent":
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
else:
image = latents
has_nsfw_concept = None
if has_nsfw_concept is None:
do_denormalize = [True] * image.shape[0]
else:
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
# Offload all models
self.maybe_free_model_hooks()
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
| diffusers/src/diffusers/pipelines/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py/0 | {
"file_path": "diffusers/src/diffusers/pipelines/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py",
"repo_id": "diffusers",
"token_count": 34125
} | 138 |
Subsets and Splits