repo_id
stringlengths
15
86
file_path
stringlengths
28
180
content
stringlengths
1
1.75M
__index_level_0__
int64
0
0
hf_public_repos/tokenizers/tokenizers/src/models
hf_public_repos/tokenizers/tokenizers/src/models/unigram/mod.rs
//! [Unigram](https://arxiv.org/abs/1804.10959) model. mod lattice; mod model; mod serialization; mod trainer; mod trie; pub use lattice::*; pub use model::*; pub use trainer::*;
0
hf_public_repos/tokenizers/tokenizers/src/models
hf_public_repos/tokenizers/tokenizers/src/models/unigram/model.rs
use super::{ lattice::Lattice, trainer::UnigramTrainer, trie::{Trie, TrieBuilder}, }; use crate::tokenizer::{Model, Result, Token}; use crate::utils::cache::Cache; use std::collections::HashMap; use std::convert::TryInto; use std::fs::read_to_string; use std::path::{Path, PathBuf}; type TokenMap = HashMap<String, u32>; type Vocab = Vec<(String, f64)>; /// A `Unigram` model to encode sentences. pub struct Unigram { token_to_ids: TokenMap, pub(crate) vocab: Vocab, cache: Cache<String, Vec<String>>, trie: Trie<u8>, pub min_score: f64, pub(super) unk_id: Option<usize>, pub(super) bos_id: usize, pub(super) eos_id: usize, fuse_unk: bool, is_optimized: bool, byte_fallback: bool, } impl PartialEq for Unigram { fn eq(&self, other: &Self) -> bool { self.unk_id == other.unk_id && self.vocab == other.vocab } } impl Clone for Unigram { // `Clone` can't be derive because it's not implemented for `Cache`. // To keep things simple when we clone, the new Unigram will start with a fresh cache. fn clone(&self) -> Self { let fresh_cache = self.cache.fresh(); Self { vocab: self.vocab.clone(), cache: fresh_cache, token_to_ids: self.token_to_ids.clone(), trie: self.trie.clone(), min_score: self.min_score, unk_id: self.unk_id, bos_id: self.bos_id, eos_id: self.eos_id, fuse_unk: self.fuse_unk, is_optimized: self.is_optimized, byte_fallback: self.byte_fallback, } } } impl std::fmt::Debug for Unigram { fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { fmt.debug_struct("Unigram") .field("vocab", &self.vocab.len()) .field("unk_id", &self.unk_id) .field("byte_fallback", &self.byte_fallback) .finish() } } static K_UNK_PENALTY: f64 = 10.0; #[derive(thiserror::Error, Debug)] pub enum UnigramError { #[error("The vocabulary is empty but at least <unk> is needed")] EmptyVocabulary, #[error("The `unk_id` is larger than vocabulary size")] UnkIdNotInVocabulary, #[error("Encountered an unknown token but `unk_id` is missing")] MissingUnkId, } impl Default for Unigram { fn default() -> Self { let vocab = vec![("<unk>".to_string(), 0.0)]; Self::from(vocab, Some(0), false).unwrap() } } impl Unigram { /// Create a `Unigram` model from a given vocabulary. /// Vocabulary are the various tokens and their associated score which is a sort of a logprob of /// their frequency, which will enable tokenization and sampling. /// unk_id, is the index within the vocabulary. /// For now `Unigram` *requires* at least `unk` because we might find a never seen char. /// Further versions might allow that part to be hidden. pub fn from( vocab: Vec<(String, f64)>, unk_id: Option<usize>, byte_fallback: bool, ) -> Result<Self> { let n = vocab.len(); let mut token_to_ids: TokenMap = HashMap::new(); let mut builder = TrieBuilder::default(); if let Some(unk_id) = unk_id { if vocab.is_empty() { return Err(Box::new(UnigramError::EmptyVocabulary)); } if unk_id >= vocab.len() { return Err(Box::new(UnigramError::UnkIdNotInVocabulary)); } } let bos_id = n + 1; let eos_id = n + 2; let mut min_score = f64::INFINITY; for (id, (token, score)) in vocab.iter().enumerate() { token_to_ids.insert(token.to_string(), id as u32); let bytes: Vec<u8> = token.bytes().collect(); builder.push(&bytes); if score < &min_score { min_score = *score; } } let trie = builder.build(); let fuse_unk = true; let is_optimized = true; Ok(Self { vocab, token_to_ids, trie, min_score, bos_id, eos_id, unk_id, fuse_unk, cache: Cache::default(), is_optimized, byte_fallback, }) } #[cfg(test)] pub(super) fn set_fuse_unk(&mut self, fuse_unk: bool) { self.fuse_unk = fuse_unk; self.cache = self.cache.fresh(); } #[cfg(test)] pub(super) fn set_optimized(&mut self, is_optimized: bool) { self.is_optimized = is_optimized; } pub fn byte_fallback(&self) -> bool { self.byte_fallback } pub(super) fn len(&self) -> usize { self.vocab.len() } pub(super) fn populate_nodes(&self, lattice: &mut Lattice) { let unk_score = self.min_score - K_UNK_PENALTY; let len = lattice.len(); let mut begin_pos = 0; while begin_pos < len { let mblen = lattice.sentence[begin_pos..] .chars() .next() .unwrap() .len_utf8(); let mut has_single_node = false; for bytes in self .trie .common_prefix_search(lattice.sentence.bytes().skip(begin_pos)) { let n = bytes.len(); let tok = String::from_utf8(bytes).unwrap(); let id = *self.token_to_ids.get(&tok).unwrap(); let item = &self.vocab[id as usize]; assert_eq!(item.0, tok); let score: f64 = item.1; lattice.insert(begin_pos, n, score, id.try_into().unwrap()); if !has_single_node && n == mblen { has_single_node = true; } } if !has_single_node { if let Some(unk_id) = self.unk_id { lattice.insert(begin_pos, mblen, unk_score, unk_id); } } begin_pos += mblen } } /// This functions take a String, and will encode it in a Vec of Strings, /// of the best tokenization available to the current model. /// ``` /// use tokenizers::models::unigram::Unigram; /// /// let pieces = vec![ /// ("<unk>".to_string(), 0.0), /// ("a".to_string(), 0.0), /// ("b".to_string(), 0.0), /// ("c".to_string(), 0.0), /// ("d".to_string(), 0.0), /// ("cd".to_string(), 1.0), /// ("ab".to_string(), 2.0), /// ("abc".to_string(), 5.0), /// ("abcd".to_string(), 10.0), /// ]; /// let model = Unigram::from(pieces, Some(0), false).unwrap(); /// let result = model.encode("abcdacdxx").unwrap(); /// assert_eq!(result, vec!["abcd", "a", "cd", "xx"]); /// ``` pub fn encode(&self, sentence: &str) -> Result<Vec<String>> { if sentence.is_empty() { return Ok(vec![]); } if let Some(result) = self.cache.get(sentence) { Ok(result.to_vec()) } else { let result = if self.is_optimized { self.encode_optimized(sentence)? } else { self.encode_unoptimized(sentence)? }; self.cache.set(sentence.to_owned(), result.clone()); Ok(result) } } fn encode_optimized(&self, sentence: &str) -> Result<Vec<String>> { // https://github.com/google/sentencepiece/blob/d48247191a6d50e469ed1a4a36e877befffd1851/src/unigram_model.cc#L600 #[derive(Debug, Clone)] struct BestPathNode { /// The vocab id. (maybe UNK) id: usize, /// The total score of the best path ending at this node. best_path_score: f64, /// The starting position (in utf-8) of this node. The entire best /// path can be constructed by backtracking along this link. starts_at: Option<usize>, } impl Default for BestPathNode { fn default() -> Self { Self { id: 0, best_path_score: 0.0, starts_at: None, } } } let size = sentence.len(); let unk_score = self.min_score - K_UNK_PENALTY; let mut best_path_ends_at = vec![BestPathNode::default(); size + 1]; let mut starts_at = 0; while starts_at < size { let best_path_score_till_here = best_path_ends_at[starts_at].best_path_score; let mut has_single_node = false; let mblen = sentence[starts_at..].chars().next().unwrap().len_utf8(); for tok_bytes in self .trie .common_prefix_search(sentence.bytes().skip(starts_at)) { let key_pos = starts_at + tok_bytes.len(); let token: String = String::from_utf8(tok_bytes).unwrap(); let target_node = &mut best_path_ends_at[key_pos]; let length = key_pos - starts_at; let id = self.token_to_ids.get(&token).unwrap(); let score = self.vocab.get(*id as usize).unwrap().1; let candidate_best_path_score = score + best_path_score_till_here; if target_node.starts_at.is_none() || candidate_best_path_score > target_node.best_path_score { target_node.best_path_score = candidate_best_path_score; target_node.starts_at = Some(starts_at); target_node.id = *id as usize; } if !has_single_node && length == mblen { has_single_node = true; } } if !has_single_node { let target_node = &mut best_path_ends_at[starts_at + mblen]; let candidate_best_path_score = unk_score + best_path_score_till_here; if target_node.starts_at.is_none() || candidate_best_path_score > target_node.best_path_score { target_node.best_path_score = candidate_best_path_score; target_node.starts_at = Some(starts_at); target_node.id = self.unk_id.ok_or(UnigramError::MissingUnkId)?; } } starts_at += mblen } let mut ends_at = size; let mut results: Vec<String> = vec![]; let mut token = vec![]; while ends_at > 0 { let node = &best_path_ends_at[ends_at]; let starts_at = node.starts_at.unwrap(); if self.fuse_unk && self.unk_id.is_some() && node.id == self.unk_id.ok_or(UnigramError::MissingUnkId)? { token.push( String::from_utf8(sentence[starts_at..ends_at].as_bytes().to_vec()).unwrap(), ); } else { if !token.is_empty() { token.reverse(); results.push(token.concat()); token = vec![]; } results.push( String::from_utf8(sentence[starts_at..ends_at].as_bytes().to_vec()).unwrap(), ); } ends_at = starts_at; } if !token.is_empty() { token.reverse(); results.push(token.concat()); } results.reverse(); Ok(results) } fn encode_unoptimized(&self, sentence: &str) -> Result<Vec<String>> { let mut lattice = Lattice::from(sentence, self.bos_id, self.eos_id); self.populate_nodes(&mut lattice); if self.fuse_unk { let mut results = vec![]; let mut token = String::new(); for node in lattice.viterbi().iter() { let item = lattice.piece(&node.borrow()); if node.borrow().id == self.unk_id.ok_or(UnigramError::MissingUnkId)? { token.push_str(&item); } else { if !token.is_empty() { results.push(token); token = String::new(); } results.push(item.to_string()); } } if !token.is_empty() { results.push(token); } Ok(results) } else { Ok(lattice.tokens()) } } /// Iterate of vocabulary of the model as a pair of `(token, score)`. pub fn iter(&self) -> UnigramIterator { UnigramIterator { model: self, i: 0 } } /// Loads a SentencePiece output model after being trained by tokenizers. /// After that you can use the model with tokenizers library. /// ```no_run /// use tokenizers::models::unigram::Unigram; /// use std::path::Path; /// /// let model = Unigram::load("mymodel-unigram.json").unwrap(); /// ``` pub fn load<P: AsRef<Path>>(path: P) -> Result<Unigram> { let string = read_to_string(path)?; Ok(serde_json::from_str(&string)?) } } /// Iterator to iterate of vocabulary of the model, and their relative score. pub struct UnigramIterator<'a> { model: &'a Unigram, i: usize, } impl<'a> Iterator for UnigramIterator<'a> { type Item = &'a (String, f64); fn next(&mut self) -> Option<Self::Item> { let i = self.i; if i < self.model.len() { let r = Some(&self.model.vocab[i]); self.i += 1; r } else { None } } } impl Model for Unigram { type Trainer = UnigramTrainer; fn get_vocab(&self) -> HashMap<String, u32> { self.token_to_ids.clone() } fn get_vocab_size(&self) -> usize { self.vocab.len() } fn tokenize(&self, sentence: &str) -> Result<Vec<Token>> { let str_tokens = self.encode(sentence)?; let mut offset = 0; let mut tokens = Vec::with_capacity(str_tokens.len()); for string in str_tokens { let len = string.len(); let offsets = (offset, offset + len); let id: u32 = match self.token_to_ids.get(&string) { Some(id) => *id, None => { if self.byte_fallback { let byte_tokens: Option<Vec<_>> = string .bytes() .map(|byte| -> Option<Token> { let byte_string = format!("<0x{:02X}>", byte); let id = self.token_to_ids.get(&byte_string); id.map(|id| Token::new(*id, byte_string, (offset, offset + len))) }) .collect(); if let Some(byte_tokens) = byte_tokens { for token in byte_tokens { tokens.push(token); } offset += len; continue; } } self.unk_id.ok_or(UnigramError::MissingUnkId)? as u32 } }; offset += len; tokens.push(Token::new(id, string, offsets)); } Ok(tokens) } fn token_to_id(&self, token: &str) -> Option<u32> { self.token_to_ids.get(token).copied() } fn id_to_token(&self, id: u32) -> Option<String> { self.vocab.get(id as usize).map(|item| item.0.clone()) } fn save(&self, folder: &Path, name: Option<&str>) -> Result<Vec<PathBuf>> { let name = match name { Some(name) => format!("{}-unigram.json", name), None => "unigram.json".to_string(), }; let mut fullpath = PathBuf::new(); fullpath.push(folder); fullpath.push(name); let string = serde_json::to_string_pretty(self)?; std::fs::write(&fullpath, string)?; Ok(vec![fullpath]) } fn get_trainer(&self) -> Self::Trainer { UnigramTrainer::default() } } #[cfg(test)] mod tests { use super::*; #[test] fn test_populate_nodes_unk() { let pieces = vec![("<unk>".to_string(), 0.0)]; let model = Unigram::from(pieces, Some(0), false).unwrap(); let mut lattice = Lattice::from("abc", model.bos_id, model.eos_id); model.populate_nodes(&mut lattice); assert_eq!(lattice.begin_nodes[0].len(), 1); assert_eq!(lattice.begin_nodes[1].len(), 1); assert_eq!(lattice.begin_nodes[2].len(), 1); assert_eq!(lattice.begin_nodes[0][0].borrow().id, 0); assert_eq!(lattice.begin_nodes[1][0].borrow().id, 0); assert_eq!(lattice.begin_nodes[2][0].borrow().id, 0); assert_eq!(lattice.begin_nodes[0][0].borrow().node_id, 2); assert_eq!(lattice.begin_nodes[1][0].borrow().node_id, 3); assert_eq!(lattice.begin_nodes[2][0].borrow().node_id, 4); } #[test] fn test_populate_nodes() { let pieces = vec![ ("<unk>".to_string(), 0.0), ("a".to_string(), 0.1), ("b".to_string(), 0.2), ("ab".to_string(), 0.3), ("bc".to_string(), 0.4), ]; let model = Unigram::from(pieces, Some(0), false).unwrap(); let mut lattice = Lattice::from("abc", model.bos_id, model.eos_id); model.populate_nodes(&mut lattice); assert_eq!(lattice.begin_nodes[0].len(), 2); // a, ab assert_eq!(lattice.begin_nodes[1].len(), 2); // b, bc assert_eq!(lattice.begin_nodes[2].len(), 1); // c(unk) // Id is the vocabulary id from Unigram model // node_id is simply the rank of the given node in the lattice. assert_eq!(lattice.begin_nodes[0][0].borrow().id, 1); assert_eq!(lattice.begin_nodes[0][1].borrow().id, 3); assert_eq!(lattice.begin_nodes[1][0].borrow().id, 2); assert_eq!(lattice.begin_nodes[1][1].borrow().id, 4); assert_eq!(lattice.begin_nodes[2][0].borrow().id, 0); assert_eq!(lattice.begin_nodes[0][0].borrow().node_id, 2); assert_eq!(lattice.begin_nodes[0][1].borrow().node_id, 3); assert_eq!(lattice.begin_nodes[1][0].borrow().node_id, 4); assert_eq!(lattice.begin_nodes[1][1].borrow().node_id, 5); assert_eq!(lattice.begin_nodes[2][0].borrow().node_id, 6); } #[test] fn test_encode() { let sentencepieces = vec![ ("<unk>".to_string(), 0.0), ("a".to_string(), 0.0), ("b".to_string(), 0.0), ("c".to_string(), 0.0), ("d".to_string(), 0.0), ("cd".to_string(), 1.0), ("ab".to_string(), 2.0), ("abc".to_string(), 5.0), ("abcd".to_string(), 10.0), ]; let model = Unigram::from(sentencepieces, Some(0), false).unwrap(); let result = model.encode("abcd").unwrap(); assert_eq!(result, vec!["abcd"]); } #[test] fn test_encode2() { let sentencepieces = vec![ ("<unk>".to_string(), 0.0), ("ab".to_string(), 0.0), ("cd".to_string(), -0.1), ("abc".to_string(), -0.2), ("a".to_string(), -0.3), ("b".to_string(), -0.4), ("c".to_string(), -0.5), ("ABC".to_string(), -0.5), ("abcdabcd".to_string(), 20.0), // User defined just max the scores. ("q".to_string(), 20.5), ("r".to_string(), 20.5), ("qr".to_string(), -0.5), ]; let mut model = Unigram::from(sentencepieces, Some(0), false).unwrap(); for is_optimized in &[true, false] { model.set_optimized(*is_optimized); println!("IsOptimized {:?}", is_optimized); assert_eq!(model.encode("abc").unwrap(), vec!["abc"]); assert_eq!(model.encode("AB").unwrap(), vec!["AB"]); model.set_fuse_unk(false); assert_eq!(model.encode("AB").unwrap(), vec!["A", "B"]); model.set_fuse_unk(true); assert_eq!(model.encode("AB").unwrap(), vec!["AB"]); assert_eq!(model.encode("abcd").unwrap(), vec!["ab", "cd"]); assert_eq!(model.encode("abcc").unwrap(), vec!["abc", "c"]); assert_eq!( model.encode("xabcabaabcdd").unwrap(), vec!["x", "abc", "ab", "a", "ab", "cd", "d"] ); model.set_fuse_unk(false); assert_eq!( model.encode("xyz東京").unwrap(), vec!["x", "y", "z", "東", "京"] ); model.set_fuse_unk(true); assert_eq!(model.encode("xyz東京").unwrap(), vec!["xyz東京"]); // User encoded in original version assert_eq!(model.encode("ABC").unwrap(), vec!["ABC"]); assert_eq!(model.encode("abABCcd").unwrap(), vec!["ab", "ABC", "cd"]); assert_eq!( model.encode("ababcdabcdcd").unwrap(), vec!["ab", "abcdabcd", "cd"] ); assert_eq!(model.encode("abqrcd").unwrap(), vec!["ab", "q", "r", "cd"]); } } #[test] fn test_unigram_bytefallback() { // In [97]: processor.encode_as_pieces("⅐⅛⅑ ") // Out[97]: ['▁', '<0xE2>', '<0x85>', '<0x90>', '⅛', '<0xE2>', '<0x85>', '<0x91>', '▁'] let sentencepieces = vec![ ("<unk>".to_string(), 0.0), ("<0xC3>".to_string(), -0.01), ("<0xA9>".to_string(), -0.03), ]; let unigram = Unigram::from(sentencepieces, Some(0), true).unwrap(); let tokens: Vec<Token> = unigram.tokenize("é").unwrap(); assert_eq!( tokens, [ Token { id: 1, value: "<0xC3>".to_string(), offsets: (0, 2) }, Token { id: 2, value: "<0xA9>".to_string(), offsets: (0, 2) } ] ); let tokens = unigram.tokenize("?é").unwrap(); assert_eq!(tokens[0].id, 0); } }
0
hf_public_repos/tokenizers/tokenizers/src/models
hf_public_repos/tokenizers/tokenizers/src/models/unigram/serialization.rs
use super::model::Unigram; use serde::{ de::{Error, MapAccess, Visitor}, ser::SerializeStruct, Deserialize, Deserializer, Serialize, Serializer, }; impl Serialize for Unigram { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { let mut model = serializer.serialize_struct("Unigram", 3)?; model.serialize_field("type", "Unigram")?; model.serialize_field("unk_id", &self.unk_id)?; model.serialize_field("vocab", &self.vocab)?; model.serialize_field("byte_fallback", &self.byte_fallback())?; model.end() } } impl<'de> Deserialize<'de> for Unigram { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { deserializer.deserialize_struct( "Unigram", &["type", "vocab", "unk_id", "byte_fallback"], UnigramVisitor, ) } } struct UnigramVisitor; impl<'de> Visitor<'de> for UnigramVisitor { type Value = Unigram; fn expecting(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { write!(fmt, "struct Unigram") } fn visit_map<V>(self, mut map: V) -> std::result::Result<Self::Value, V::Error> where V: MapAccess<'de>, { let mut vocab: Option<Vec<(String, f64)>> = None; let mut unk_id: Option<usize> = None; let mut byte_fallback: bool = false; while let Some(key) = map.next_key::<String>()? { match key.as_ref() { "unk_id" => { unk_id = map.next_value()?; } "byte_fallback" => byte_fallback = map.next_value()?, "vocab" => vocab = Some(map.next_value()?), "type" => match map.next_value()? { "Unigram" => {} u => { return Err(serde::de::Error::invalid_value( serde::de::Unexpected::Str(u), &"Unigram", )) } }, _ => (), } } match (vocab, unk_id, byte_fallback) { (Some(vocab), unk_id, byte_fallback) => Ok(Unigram::from(vocab, unk_id, byte_fallback) .map_err(|err| Error::custom(format!("Unable to load vocab {:?}", err)))?), (None, _, _) => Err(Error::custom("Missing vocab")), } } } #[cfg(test)] mod test { use super::*; #[test] fn test_serialization() { let vocab = vec![("<unk>".to_string(), 0.0), ("a".to_string(), -0.5)]; let model = Unigram::from(vocab, Some(0), false).unwrap(); let data = serde_json::to_string(&model).unwrap(); let reconstructed = serde_json::from_str(&data).unwrap(); assert_eq!(model, reconstructed); } #[test] fn test_serialization_unk_id_not_zero() { let vocab = vec![("a".to_string(), -0.5), ("<unk>".to_string(), 0.0)]; let model = Unigram::from(vocab, Some(1), false).unwrap(); let data = serde_json::to_string(&model).unwrap(); let reconstructed = serde_json::from_str(&data).unwrap(); assert_eq!(model, reconstructed); } #[test] fn test_serialization_no_unk_id() { let vocab = vec![("a".to_string(), -0.5)]; let model = Unigram::from(vocab, None, false).unwrap(); let data = serde_json::to_string(&model).unwrap(); let reconstructed = serde_json::from_str(&data).unwrap(); assert_eq!(model, reconstructed); } }
0
hf_public_repos/tokenizers/tokenizers/src/models
hf_public_repos/tokenizers/tokenizers/src/models/unigram/trainer.rs
use crate::models::unigram::{lattice::Lattice, model::Unigram}; use crate::tokenizer::{AddedToken, Result, Trainer}; use crate::utils::parallelism::*; use crate::utils::progress::{ProgressBar, ProgressStyle}; use log::debug; use serde::{Deserialize, Serialize}; use std::cmp::Reverse; use std::collections::{HashMap, HashSet}; use std::convert::TryInto; // A token and a score type SentencePiece = (String, f64); // A full sentence or word + it's count within the dataset type Sentence = (String, u32); fn digamma(mut x: f64) -> f64 { let mut result = 0.0; while x < 7.0 { result -= 1.0 / x; x += 1.0; } x -= 1.0 / 2.0; let xx = 1.0 / x; let xx2 = xx * xx; let xx4 = xx2 * xx2; result += x.ln() + (1.0 / 24.0) * xx2 - 7.0 / 960.0 * xx4 + (31.0 / 8064.0) * xx4 * xx2 - (127.0 / 30720.0) * xx4 * xx4; result } #[derive(thiserror::Error, Debug)] pub enum UnigramTrainerError { #[error("The vocabulary is not large enough to contain all chars")] VocabularyTooSmall, } fn to_log_prob(pieces: &mut [SentencePiece]) { let sum: f64 = pieces.iter().map(|(_, score)| score).sum(); let logsum = sum.ln(); for (_, score) in pieces.iter_mut() { *score = score.ln() - logsum; } } /// A `UnigramTrainer` can train a `Unigram` model from `word_counts`. #[non_exhaustive] #[derive(Builder, Debug, Clone, Serialize, Deserialize)] pub struct UnigramTrainer { #[builder(default = "true")] pub show_progress: bool, #[builder(default = "8000")] pub vocab_size: u32, #[builder(default = "2")] pub n_sub_iterations: u32, #[builder(default = "0.75")] pub shrinking_factor: f64, #[builder(default = "vec![]")] pub special_tokens: Vec<AddedToken>, #[builder(default = "HashSet::new()")] pub initial_alphabet: HashSet<char>, #[builder(default = "None")] pub unk_token: Option<String>, #[builder(default = "16")] pub max_piece_length: usize, #[builder(default = "1_000_000")] seed_size: usize, #[builder(default = "HashMap::new()")] words: HashMap<String, u32>, } impl Default for UnigramTrainer { fn default() -> Self { Self::builder().build().unwrap() } } impl UnigramTrainer { pub fn builder() -> UnigramTrainerBuilder { UnigramTrainerBuilder::default() } /// Setup a progress bar if asked to show progress fn setup_progress(&self) -> Option<ProgressBar> { if self.show_progress { let p = ProgressBar::new(0); p.set_style( ProgressStyle::default_bar() .template("[{elapsed_precise}] {msg:<40!} {wide_bar} {pos:<9!}/{len:>9!}"), ); Some(p) } else { None } } fn is_valid_sentencepiece(&self, char_string: &[char]) -> bool { // Checks string length // Space not in the substring, numbers, hiragana and more should be taken // care of within pre_tokenizers. // https://github.com/google/sentencepiece/blob/26be9516cd81d5315ee31c48d2438018e0eab879/src/trainer_interface.cc#L203 let n = char_string.len(); if char_string.is_empty() || n > self.max_piece_length { return false; } true } fn finalize(&self, model: Unigram, required_chars: HashSet<String>) -> Result<Unigram> { let mut min_score_penalty = 0.0; let min_score_penalty_delta = 0.0001; let mut pieces: Vec<(String, f64)> = vec![]; let mut inserted: HashSet<String> = HashSet::new(); // We don't want to include the <UNK> that was used to train inserted.insert("<UNK>".into()); let existing_pieces: HashMap<String, f64> = model.iter().cloned().collect(); for c in required_chars { if let Some(t) = existing_pieces.get(&c) { inserted.insert(c.clone()); pieces.push((c, *t)); } else { let score = model.min_score + min_score_penalty; inserted.insert(c.clone()); pieces.push((c, score)); min_score_penalty += min_score_penalty_delta; } } let (unk_id, need_add_unk) = if let Some(ref unk) = self.unk_token { let unk_id = self.special_tokens.iter().enumerate().find_map(|(i, t)| { if t.content == *unk { Some(i) } else { None } }); match unk_id { Some(id) => (Some(id), false), None => (Some(0), true), } } else { (None, false) }; let vocab_size_without_special_tokens = if need_add_unk { self.vocab_size as usize - self.special_tokens.len() - 1 } else { self.vocab_size as usize - self.special_tokens.len() }; for (token, score) in model.iter() { if inserted.contains::<str>(token) { continue; } inserted.insert(token.to_string()); pieces.push((token.to_string(), if score.is_nan() { 0.0 } else { *score })); if pieces.len() == vocab_size_without_special_tokens { break; } } pieces.sort_by(|(_, a), (_, b)| b.partial_cmp(a).unwrap()); // Insert the necessary tokens let mut special_tokens = self .special_tokens .iter() .map(|t| (t.content.clone(), 0.0)) .collect::<Vec<_>>(); if need_add_unk { special_tokens.insert(0, (self.unk_token.clone().unwrap(), 0.0)); } Unigram::from( special_tokens.into_iter().chain(pieces).collect(), unk_id, model.byte_fallback(), ) } fn required_chars(&self, word_counts: &[Sentence]) -> HashSet<String> { word_counts .iter() .flat_map(|(s, _count)| s.chars()) .chain(self.initial_alphabet.iter().copied()) .map(|c| c.to_string()) .collect() } fn make_seed_sentence_pieces( &self, sentences: &[Sentence], _progress: &Option<ProgressBar>, ) -> Vec<SentencePiece> { // Put all sentences in a string, separated by \0 let total: usize = sentences .iter() .map(|(s, _)| s.chars().count()) .sum::<usize>() + sentences.len(); let mut flat_string = String::with_capacity(total); let mut all_chars: HashMap<char, u32> = HashMap::new(); let c_sentence_boundary = '\0'; let k_sentence_boundary = '\0'.to_string(); for (string, n) in sentences { if string.is_empty() { continue; } flat_string.push_str(string); // XXX // Comment suggests we add sentence boundary, but it seems to be missing from actual // code in spm. flat_string.push_str(&k_sentence_boundary); for c in string.chars() { if c != c_sentence_boundary { *all_chars.entry(c).or_insert(0) += n; } } } flat_string.shrink_to_fit(); #[cfg(feature = "esaxx_fast")] let suffix = esaxx_rs::suffix(&flat_string).unwrap(); #[cfg(not(feature = "esaxx_fast"))] let suffix = esaxx_rs::suffix_rs(&flat_string).unwrap(); // Basic chars need to be in sentence pieces. let mut seed_sentencepieces: Vec<SentencePiece> = vec![]; let mut sall_chars: Vec<_> = all_chars.into_iter().map(|(a, b)| (b, a)).collect(); // Reversed order sall_chars.sort_by_key(|&a| Reverse(a)); let mut substr_index: Vec<_> = suffix .iter() .filter_map(|(string, freq)| { if string.len() <= 1 { return None; } if string.contains(&c_sentence_boundary) { return None; } if !self.is_valid_sentencepiece(string) { return None; } let score = freq * string.len() as u32; // if let Some(p) = &progress { // p.inc(1); // } Some((score, string)) }) .collect(); // Fill seed_sentencepieces for (count, character) in sall_chars { seed_sentencepieces.push((character.to_string(), count.into())); } // sort by decreasing score substr_index.sort_by_key(|&a| Reverse(a)); for (score, char_string) in substr_index { // Just in case assert!(self.is_valid_sentencepiece(char_string)); let string: String = char_string.iter().collect(); seed_sentencepieces.push((string, score.into())); if seed_sentencepieces.len() >= self.seed_size { break; } } to_log_prob(&mut seed_sentencepieces); seed_sentencepieces } fn prune_sentence_pieces( &self, model: &Unigram, pieces: &[SentencePiece], sentences: &[Sentence], ) -> Vec<SentencePiece> { let mut always_keep = vec![true; pieces.len()]; let mut alternatives: Vec<Vec<usize>> = vec![Vec::new(); pieces.len()]; let bos_id = pieces.len() + 1; let eos_id = pieces.len() + 2; // First, segments the current sentencepieces to know // how each sentencepiece is resegmented if this sentencepiece is removed // from the vocabulary. // To do so, we take the second best segmentation of sentencepiece[i]. // alternatives[i] stores the sequence of second best sentencepieces. for (id, (token, _score)) in pieces.iter().enumerate() { // Always keep unk. if id == 0 { always_keep[id] = false; continue; } let mut lattice = Lattice::from(token, bos_id, eos_id); model.populate_nodes(&mut lattice); let nbests = lattice.nbest(2); if nbests.len() == 1 { always_keep[id] = true; } else if nbests[0].len() >= 2 { always_keep[id] = false; } else if nbests[0].len() == 1 { always_keep[id] = true; for node in &nbests[1] { let alt_id = node.borrow().id; alternatives[id].push(alt_id); } } } // Second, segments all sentences to compute likelihood // with a unigram language model. inverted[i] stores // the set of sentence index where the sentencepieces[i] appears. let chunk_size = std::cmp::max(sentences.len() / current_num_threads(), 1); let indexed_sentences: Vec<(usize, &Sentence)> = sentences.iter().enumerate().collect(); let collected: (f64, Vec<f64>, Vec<Vec<usize>>) = indexed_sentences .maybe_par_chunks(chunk_size) .map(|enumerated_sentence_count_chunk| { let mut vsum = 0.0; let mut freq: Vec<f64> = vec![0.0; pieces.len()]; let mut inverted: Vec<Vec<usize>> = vec![Vec::new(); pieces.len()]; for (i, (sentence, count)) in enumerated_sentence_count_chunk { let mut lattice = Lattice::from(sentence, bos_id, eos_id); model.populate_nodes(&mut lattice); vsum += *count as f64; for node_ref in lattice.viterbi() { let id = node_ref.borrow().id; freq[id] += *count as f64; inverted[id].push(*i); } } (vsum, freq, inverted) }) .reduce( || (0.0, vec![0.0; pieces.len()], vec![Vec::new(); pieces.len()]), |(vsum, freq, inverted), (lvsum, lfreq, linverted)| { ( vsum + lvsum, freq.iter() .zip(lfreq) .map(|(global_el, local_el)| global_el + local_el) .collect(), inverted .iter() .zip(linverted) .map(|(global_el, local_el)| [&global_el[..], &local_el[..]].concat()) .collect(), ) }, ); let (vsum, freq, inverted) = collected; let sum: f64 = freq.iter().sum(); let logsum = sum.ln(); let mut candidates: Vec<(usize, f64)> = vec![]; let mut new_pieces: Vec<SentencePiece> = Vec::with_capacity(self.vocab_size as usize); new_pieces.push(pieces[0].clone()); // Finally, computes how likely the LM likelihood is reduced if // the sentencepiece[i] is removed from the vocabulary. // Since the exact computation of loss is difficult, we compute the // loss approximately by assuming that all sentencepiece[i] in the sentences // are replaced with alternatives[i] when sentencepiece[i] is removed. for (id, (token, score)) in pieces.iter().enumerate() { if id == 0 { continue; } if freq[id] == 0.0 && !always_keep[id] { // not found in Viterbi path. Can remove this entry safely. continue; } else if alternatives[id].is_empty() { // no alternatives. Keeps this entry. new_pieces.push((token.to_string(), *score)); } else { let mut f = 0.0; // the frequency of pieces[i]; for n in &inverted[id] { let score = sentences[*n].1 as f64; f += score; } // TODO: Temporary hack to avoid Nans. if f == 0.0 || f.is_nan() { // new_pieces.push((token.to_string(), *score)); continue; } f /= vsum; // normalizes by all sentence frequency. let logprob_sp = freq[id].ln() - logsum; // After removing the sentencepiece[i], its frequency freq[i] is // re-assigned to alternatives. // new_sum = current_sum - freq[i] + freq[i] * alternatives.size() // = current_sum + freq[i] (alternatives - 1) let logsum_alt = (sum + freq[id] * (alternatives.len() - 1) as f64).ln(); // The frequencies of altenatives are increased by freq[i]. let mut logprob_alt = 0.0; for n in &alternatives[id] { logprob_alt += (freq[*n] + freq[id]).ln() - logsum_alt; } // loss: the diff of likelihood after removing the sentencepieces[i]. let loss = f * (logprob_sp - logprob_alt); if loss.is_nan() { panic!(""); } candidates.push((id, loss)); } } let desired_vocab_size: usize = (self.vocab_size as usize * 11) / 10; // * 1.1 let pruned_size: usize = ((pieces.len() as f64) * self.shrinking_factor) as usize; let pruned_size = desired_vocab_size.max(pruned_size); candidates.sort_by(|(_, a), (_, b)| b.partial_cmp(a).unwrap()); for (id, _score) in candidates { if new_pieces.len() == pruned_size { break; } new_pieces.push(pieces[id].clone()); } new_pieces.to_vec() } /// Update the progress bar with the new provided length and message fn update_progress(&self, p: &Option<ProgressBar>, len: usize, message: &str) { if let Some(p) = p { p.set_message(message); p.set_length(len as u64); p.set_draw_delta(len as u64 / 100); p.reset(); } } /// Set the progress bar in the finish state fn finalize_progress(&self, p: &Option<ProgressBar>, final_len: usize) { if let Some(p) = p { p.set_length(final_len as u64); p.finish(); println!(); } } fn run_e_step(&self, model: &Unigram, sentences: &[Sentence]) -> (f64, u32, Vec<f64>) { let all_sentence_freq: u32 = sentences.iter().map(|(_a, b)| *b).sum(); let chunk_size = std::cmp::max(sentences.len() / current_num_threads(), 1); let collected: (f64, u32, Vec<f64>) = sentences .maybe_par_chunks(chunk_size) .map(|sentences_chunk| { let mut expected: Vec<f64> = vec![0.0; model.len()]; let mut objs: f64 = 0.0; let mut ntokens: u32 = 0; for (string, freq) in sentences_chunk { let mut lattice = Lattice::from(string, model.bos_id, model.eos_id); model.populate_nodes(&mut lattice); let z: f64 = lattice.populate_marginal(*freq as f64, &mut expected); if z.is_nan() { panic!("likelihood is NAN. Input sentence may be too long."); } ntokens += lattice.viterbi().len() as u32; objs -= z / (all_sentence_freq as f64); } (objs, ntokens, expected) }) .reduce( || (0.0, 0, vec![0.0; model.len()]), |(objs, ntokens, expected), (lobjs, lntokens, lexpected)| { ( objs + lobjs, ntokens + lntokens, expected .iter() .zip(lexpected) .map(|(global_el, local_el)| global_el + local_el) .collect(), ) }, ); collected } fn run_m_step(&self, pieces: &[SentencePiece], expected: &[f64]) -> Vec<SentencePiece> { if pieces.len() != expected.len() { panic!( "Those two iterators are supposed to be the same length ({} vs {})", pieces.len(), expected.len() ); } let mut new_pieces: Vec<SentencePiece> = Vec::with_capacity(self.vocab_size.try_into().unwrap()); let mut sum = 0.0; let expected_frequency_threshold = 0.5; for (i, (freq, (piece, _score))) in expected.iter().zip(pieces).enumerate() { // Always keep unk. if i == 0 { new_pieces.push((piece.clone(), f64::NAN)); continue; } if *freq < expected_frequency_threshold { continue; } new_pieces.push((piece.clone(), *freq)); sum += freq; } // // Here we do not use the original EM, but use the // // Bayesianified/DPified EM algorithm. // // https://cs.stanford.edu/~pliang/papers/tutorial-acl2007-talk.pdf // // This modification will act as a sparse prior. let logsum = digamma(sum); let new_pieces: Vec<_> = new_pieces .into_iter() .map(|(s, c)| (s, digamma(c) - logsum)) .collect(); new_pieces } pub fn do_train( &self, sentences: Vec<Sentence>, model: &mut Unigram, ) -> Result<Vec<AddedToken>> { let progress = self.setup_progress(); // // 1. Compute frequent substrings // TODO Should be able to upgrade to u64 when needed self.update_progress(&progress, sentences.len(), "Suffix array seeds"); let mut pieces: Vec<SentencePiece> = Vec::with_capacity(self.vocab_size.try_into().unwrap()); // We use a UNK token when training, whatever the `self.unk_token` pieces.push(("<UNK>".into(), f64::NAN)); pieces.extend(self.make_seed_sentence_pieces(&sentences, &progress)); self.finalize_progress(&progress, sentences.len()); // Useful to check compatibility with spm. debug!( "Using {} pieces on {} sentences for EM training", pieces.len(), sentences.len() ); let desired_vocab_size: usize = (self.vocab_size as usize * 11) / 10; // * 1.1 // 2. Run E-M Loops to fine grain the pieces. // We will shrink the vocab by shrinking_factor every loop on average // Some other pieces are dropped if logprob is too small // V = N * (f)**k // k = log(V / N) / log(f) let expected_loops = (((desired_vocab_size as f64).ln() - (pieces.len() as f64).ln()) / self.shrinking_factor.ln()) as usize + 1; let expected_updates = expected_loops * self.n_sub_iterations as usize; self.update_progress(&progress, expected_updates, "EM training"); let required_chars = self.required_chars(&sentences); if required_chars.len() as u32 > self.vocab_size { return Err(Box::new(UnigramTrainerError::VocabularyTooSmall)); } let mut new_model = Unigram::from(pieces.clone(), Some(0), false)?; loop { // Sub-EM iteration. for _iter in 0..self.n_sub_iterations { // Executes E step let (_objective, _num_tokens, expected) = self.run_e_step(&new_model, &sentences); // Executes M step. pieces = self.run_m_step(&pieces, &expected); new_model = Unigram::from(pieces.clone(), Some(0), false)?; // Useful comment for checking compatibility with spm debug!( "Em iter={} size={} obj={} num_tokens={} num_tokens/piece={}", _iter, new_model.len(), _objective, _num_tokens, _num_tokens as f64 / model.len() as f64 ); if let Some(p) = &progress { p.inc(1); } } // end of Sub EM iteration // Stops the iteration when the size of sentences reaches to the // desired symbol size. if pieces.len() <= desired_vocab_size { break; } // Prunes pieces. pieces = self.prune_sentence_pieces(&new_model, &pieces, &sentences); new_model = Unigram::from(pieces.clone(), Some(0), false)?; } self.finalize_progress(&progress, expected_updates); // Finally, adjusts the size of sentencepices to be |vocab_size|. *model = self.finalize(new_model, required_chars)?; Ok(self.special_tokens.clone()) } } impl Trainer for UnigramTrainer { type Model = Unigram; /// Train a Unigram model fn train(&self, model: &mut Unigram) -> Result<Vec<AddedToken>> { let sentences: Vec<_> = self.words.iter().map(|(s, i)| (s.to_owned(), *i)).collect(); self.do_train(sentences, model) } /// Whether we should show progress fn should_show_progress(&self) -> bool { self.show_progress } fn feed<I, S, F>(&mut self, iterator: I, process: F) -> Result<()> where I: Iterator<Item = S> + Send, S: AsRef<str> + Send, F: Fn(&str) -> Result<Vec<String>> + Sync, { let words: Result<HashMap<String, u32>> = iterator .maybe_par_bridge() .map(|sequence| { let words = process(sequence.as_ref())?; let mut map = HashMap::new(); for word in words { map.entry(word).and_modify(|c| *c += 1).or_insert(1); } Ok(map) }) .reduce( || Ok(HashMap::new()), |acc, ws| { let mut acc = acc?; for (k, v) in ws? { acc.entry(k).and_modify(|c| *c += v).or_insert(v); } Ok(acc) }, ); self.words = words?; Ok(()) } } #[cfg(test)] mod tests { use super::*; use assert_approx_eq::assert_approx_eq; use std::iter::FromIterator; #[test] fn test_unigram_chars() { let trainer = UnigramTrainerBuilder::default() .show_progress(false) .build() .unwrap(); let sentences = vec![ ("This is a".to_string(), 1), ("こんにちは友達".to_string(), 1), ]; let required_chars = trainer.required_chars(&sentences); assert_eq!(required_chars.len(), 13); let progress = None; let table = trainer.make_seed_sentence_pieces(&sentences, &progress); let target_strings = vec![ "s", "i", " ", "達", "友", "ん", "は", "に", "ち", "こ", "h", "a", "T", "is ", "s ", ]; let strings: Vec<_> = table.iter().map(|(string, _)| string).collect(); assert_eq!(strings, target_strings); let scores = table.iter().map(|(_, score)| score); let target_scores = vec![ -2.5649493574615367, // 2.0 -2.5649493574615367, // 2.0 -2.5649493574615367, // 2.0 -3.258096538021482, // 1.0 -3.258096538021482, // 1.0 -3.258096538021482, // 1.0 -3.258096538021482, // 1.0 -3.258096538021482, // 1.0 -3.258096538021482, // 1.0 -3.258096538021482, // 1.0 -3.258096538021482, // 1.0 -3.258096538021482, // 1.0 -3.258096538021482, // 1.0 -1.4663370687934272, // 6.0 -1.8718021769015916, // 4.0 ]; for (score, target_score) in scores.zip(target_scores) { assert_approx_eq!(*score, target_score, 0.01); } } #[test] fn test_initial_alphabet() { let trainer = UnigramTrainerBuilder::default() .show_progress(false) .initial_alphabet(HashSet::from_iter(vec!['a', 'b', 'c', 'd', 'e', 'f'])) .build() .unwrap(); let sentences = vec![("こんにちは友達".to_string(), 1)]; let required_chars = trainer.required_chars(&sentences); assert_eq!( required_chars, vec!["こ", "ん", "に", "ち", "は", "友", "達", "a", "b", "c", "d", "e", "f"] .into_iter() .map(|s| s.to_owned()) .collect::<HashSet<_>>() ); } #[test] fn test_unk_token() { // 1. Should add `unk_token` as first special token let trainer = UnigramTrainerBuilder::default() .show_progress(false) .special_tokens(vec![ AddedToken::from("[SEP]", true), AddedToken::from("[CLS]", true), ]) .unk_token(Some("[UNK]".into())) .build() .unwrap(); let mut unigram = Unigram::default(); trainer .do_train(vec![("The".into(), 12), ("are".into(), 11)], &mut unigram) .unwrap(); let mut pieces = unigram.iter(); assert_eq!(pieces.next(), Some(&("[UNK]".into(), 0.0))); assert_eq!(pieces.next(), Some(&("[SEP]".into(), 0.0))); assert_eq!(pieces.next(), Some(&("[CLS]".into(), 0.0))); // 2. Let it where it is let trainer = UnigramTrainerBuilder::default() .show_progress(false) .special_tokens(vec![ AddedToken::from("[SEP]", true), AddedToken::from("[CLS]", true), AddedToken::from("[UNK]", true), ]) .unk_token(Some("[UNK]".into())) .build() .unwrap(); let mut unigram = Unigram::default(); trainer .do_train(vec![("The".into(), 12), ("are".into(), 11)], &mut unigram) .unwrap(); let mut pieces = unigram.iter(); assert_eq!(pieces.next(), Some(&("[SEP]".into(), 0.0))); assert_eq!(pieces.next(), Some(&("[CLS]".into(), 0.0))); assert_eq!(pieces.next(), Some(&("[UNK]".into(), 0.0))); // 3. Don't put it there if not needed let trainer = UnigramTrainerBuilder::default() .show_progress(false) .build() .unwrap(); let mut unigram = Unigram::default(); trainer .do_train(vec![("The".into(), 12), ("are".into(), 11)], &mut unigram) .unwrap(); let mut pieces = unigram.iter(); assert_eq!(pieces.next().unwrap().0, "e".to_string()); } #[test] fn test_special_tokens() { let trainer = UnigramTrainerBuilder::default() .show_progress(false) .special_tokens(vec![ AddedToken::from("[SEP]", true), AddedToken::from("[CLS]", true), ]) .build() .unwrap(); let mut unigram = Unigram::default(); trainer .do_train(vec![("The".into(), 12), ("are".into(), 11)], &mut unigram) .unwrap(); let mut pieces = unigram.iter(); assert_eq!(pieces.next(), Some(&("[SEP]".into(), 0.0))); assert_eq!(pieces.next(), Some(&("[CLS]".into(), 0.0))); } #[test] fn test_to_log_prob() { let mut a = vec![("".to_string(), 1.0), ("".to_string(), 2.0)]; to_log_prob(&mut a); let scores = a.iter().map(|(_, score)| *score).collect::<Vec<_>>(); // ln(1) - ln(3) assert_approx_eq!(scores[0], -1.098, 0.01); // ln(2) - ln(3) assert_approx_eq!(scores[1], -0.405, 0.01); } }
0
hf_public_repos/tokenizers/tokenizers/src/models
hf_public_repos/tokenizers/tokenizers/src/models/unigram/trie.rs
use std::collections::HashMap; use std::hash::Hash; #[derive(Default)] pub struct TrieBuilder<Label> { trie: Trie<Label>, } impl<Label: Eq + Hash + Copy> TrieBuilder<Label> { pub fn push(&mut self, element: &[Label]) { self.trie.push(element); } pub fn build(self) -> Trie<Label> { self.trie } } #[derive(Clone)] pub struct Trie<Label> { root: Node<Label>, } impl<Label: Eq + Hash + Copy> Trie<Label> { pub fn push(&mut self, element: &[Label]) { let mut node = &mut self.root; for label in element.iter() { node = node.children.entry(*label).or_insert_with(Node::default); } node.is_leaf = true; } pub fn common_prefix_search<T>(&self, iterator: T) -> TrieIterator<Label, T> where T: Iterator<Item = Label>, { TrieIterator { node: &self.root, prefix: vec![], iterator, } } } pub struct TrieIterator<'a, Label, T> { node: &'a Node<Label>, prefix: Vec<Label>, iterator: T, } impl<Label, T> Iterator for TrieIterator<'_, Label, T> where Label: Eq + Hash + Copy, T: Iterator<Item = Label>, { type Item = Vec<Label>; fn next(&mut self) -> Option<Self::Item> { loop { let label = self.iterator.next()?; self.prefix.push(label); let child = self.node.children.get(&label)?; self.node = child; if self.node.is_leaf { return Some(self.prefix.clone()); } } } } impl<Label> Default for Trie<Label> { fn default() -> Self { Self { root: Node::default(), } } } #[derive(Clone)] pub struct Node<Label> { is_leaf: bool, children: HashMap<Label, Node<Label>>, } impl<Label> Default for Node<Label> { fn default() -> Self { Self { is_leaf: false, children: HashMap::new(), } } }
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/tokenizer/pattern.rs
use crate::utils::SysRegex; use crate::{Offsets, Result}; use regex::Regex; /// Pattern used to split a NormalizedString pub trait Pattern { /// Slice the given string in a list of pattern match positions, with /// a boolean indicating whether this is a match or not. /// /// This method *must* cover the whole string in its outputs, with /// contiguous ordered slices. fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>>; } impl Pattern for char { fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>> { let is_char = |c: char| -> bool { c == *self }; is_char.find_matches(inside) } } impl Pattern for &str { fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>> { if self.is_empty() { // If we try to find the matches with an empty string, just don't match anything return Ok(vec![((0, inside.chars().count()), false)]); } let re = Regex::new(&regex::escape(self))?; (&re).find_matches(inside) } } impl Pattern for &String { fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>> { let s: &str = self; s.find_matches(inside) } } impl Pattern for &Regex { fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>> { if inside.is_empty() { return Ok(vec![((0, 0), false)]); } let mut prev = 0; let mut splits = Vec::with_capacity(inside.len()); for m in self.find_iter(inside) { if prev != m.start() { splits.push(((prev, m.start()), false)); } splits.push(((m.start(), m.end()), true)); prev = m.end(); } if prev != inside.len() { splits.push(((prev, inside.len()), false)) } Ok(splits) } } impl Pattern for &SysRegex { fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>> { if inside.is_empty() { return Ok(vec![((0, 0), false)]); } let mut prev = 0; let mut splits = Vec::with_capacity(inside.len()); for (start, end) in self.find_iter(inside) { if prev != start { splits.push(((prev, start), false)); } splits.push(((start, end), true)); prev = end; } if prev != inside.len() { splits.push(((prev, inside.len()), false)) } Ok(splits) } } impl<F> Pattern for F where F: Fn(char) -> bool, { fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>> { if inside.is_empty() { return Ok(vec![((0, 0), false)]); } let mut last_offset = 0; let mut last_seen = 0; let mut matches = inside .char_indices() .flat_map(|(b, c)| { last_seen = b + c.len_utf8(); if self(c) { let mut events = Vec::with_capacity(2); if last_offset < b { // We need to emit what was before this match events.push(((last_offset, b), false)); } events.push(((b, b + c.len_utf8()), true)); last_offset = b + c.len_utf8(); events } else { vec![] } }) .collect::<Vec<_>>(); // Do not forget the last potential split if last_seen > last_offset { matches.push(((last_offset, last_seen), false)); } Ok(matches) } } /// Invert the `is_match` flags for the wrapped Pattern. This is usefull /// for example when we use a regex that matches words instead of a delimiter, /// and we want to match the delimiter. pub struct Invert<P: Pattern>(pub P); impl<P: Pattern> Pattern for Invert<P> { fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>> { Ok(self .0 .find_matches(inside)? .into_iter() .map(|(offsets, flag)| (offsets, !flag)) .collect()) } } #[cfg(test)] mod tests { use super::*; use regex::Regex; macro_rules! do_test { ($inside: expr, $pattern: expr => @ERROR) => { assert!($pattern.find_matches($inside).is_err()); }; ($inside: expr, $pattern: expr => $result: expr) => { assert_eq!($pattern.find_matches($inside).unwrap(), $result); assert_eq!( Invert($pattern).find_matches($inside).unwrap(), $result .into_iter() .map(|v: (Offsets, bool)| (v.0, !v.1)) .collect::<Vec<_>>() ); }; } #[test] fn char() { do_test!("aba", 'a' => vec![((0, 1), true), ((1, 2), false), ((2, 3), true)]); do_test!("bbbba", 'a' => vec![((0, 4), false), ((4, 5), true)]); do_test!("aabbb", 'a' => vec![((0, 1), true), ((1, 2), true), ((2, 5), false)]); do_test!("", 'a' => vec![((0, 0), false)]); do_test!("aaa", 'b' => vec![((0, 3), false)]); } #[test] fn str() { do_test!("aba", "a" => vec![((0, 1), true), ((1, 2), false), ((2, 3), true)]); do_test!("bbbba", "a" => vec![((0, 4), false), ((4, 5), true)]); do_test!("aabbb", "a" => vec![((0, 1), true), ((1, 2), true), ((2, 5), false)]); do_test!("aabbb", "ab" => vec![((0, 1), false), ((1, 3), true), ((3, 5), false)]); do_test!("aabbab", "ab" => vec![((0, 1), false), ((1, 3), true), ((3, 4), false), ((4, 6), true)] ); do_test!("", "" => vec![((0, 0), false)]); do_test!("aaa", "" => vec![((0, 3), false)]); do_test!("aaa", "b" => vec![((0, 3), false)]); } #[test] fn functions() { let is_b = |c| c == 'b'; do_test!("aba", is_b => vec![((0, 1), false), ((1, 2), true), ((2, 3), false)]); do_test!("aaaab", is_b => vec![((0, 4), false), ((4, 5), true)]); do_test!("bbaaa", is_b => vec![((0, 1), true), ((1, 2), true), ((2, 5), false)]); do_test!("", is_b => vec![((0, 0), false)]); do_test!("aaa", is_b => vec![((0, 3), false)]); } #[test] fn regex() { let is_whitespace = Regex::new(r"\s+").unwrap(); do_test!("a b", &is_whitespace => vec![((0, 1), false), ((1, 4), true), ((4, 5), false)]); do_test!(" a b ", &is_whitespace => vec![((0, 3), true), ((3, 4), false), ((4, 7), true), ((7, 8), false), ((8, 11), true)] ); do_test!("", &is_whitespace => vec![((0, 0), false)]); do_test!("𝔾𝕠𝕠𝕕 𝕞𝕠𝕣𝕟𝕚𝕟𝕘", &is_whitespace => vec![((0, 16), false), ((16, 17), true), ((17, 45), false)] ); do_test!("aaa", &is_whitespace => vec![((0, 3), false)]); } #[test] fn sys_regex() { let is_whitespace = SysRegex::new(r"\s+").unwrap(); do_test!("a b", &is_whitespace => vec![((0, 1), false), ((1, 4), true), ((4, 5), false)]); do_test!(" a b ", &is_whitespace => vec![((0, 3), true), ((3, 4), false), ((4, 7), true), ((7, 8), false), ((8, 11), true)] ); do_test!("", &is_whitespace => vec![((0, 0), false)]); do_test!("𝔾𝕠𝕠𝕕 𝕞𝕠𝕣𝕟𝕚𝕟𝕘", &is_whitespace => vec![((0, 16), false), ((16, 17), true), ((17, 45), false)] ); do_test!("aaa", &is_whitespace => vec![((0, 3), false)]); } }
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/tokenizer/mod.rs
//! Represents a tokenization pipeline. //! //! A [`Tokenizer`](struct.Tokenizer.html) is composed of some of the following parts. //! - [`Normalizer`](trait.Normalizer.html): Takes care of the text normalization (like unicode normalization). //! - [`PreTokenizer`](trait.PreTokenizer.html): Takes care of the pre tokenization (ie. How to split tokens and pre-process //! them. //! - [`Model`](trait.Model.html): A model encapsulates the tokenization algorithm (like BPE, Word base, character //! based, ...). //! - [`PostProcessor`](trait.PostProcessor.html): Takes care of the processing after tokenization (like truncating, padding, //! ...). use std::{ collections::HashMap, fs::{read_to_string, File}, io::prelude::*, io::BufReader, ops::{Deref, DerefMut}, path::{Path, PathBuf}, }; use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; use crate::utils::iter::ResultShunt; use crate::utils::parallelism::*; use crate::utils::progress::{ProgressBar, ProgressStyle}; mod added_vocabulary; mod encoding; pub mod normalizer; pub mod pattern; pub mod pre_tokenizer; mod serialization; // Re-export wrappers pub use crate::decoders::DecoderWrapper; pub use crate::models::ModelWrapper; pub use crate::normalizers::NormalizerWrapper; pub use crate::pre_tokenizers::PreTokenizerWrapper; pub use crate::processors::PostProcessorWrapper; // And some other types pub use crate::utils::iter::LinesWithEnding; pub use crate::utils::padding::{pad_encodings, PaddingDirection, PaddingParams, PaddingStrategy}; pub use crate::utils::truncation::{ truncate_encodings, TruncationDirection, TruncationParams, TruncationStrategy, }; pub use added_vocabulary::*; pub use encoding::*; pub use normalizer::{NormalizedString, OffsetReferential, SplitDelimiterBehavior}; pub use pre_tokenizer::*; pub type Error = Box<dyn std::error::Error + Send + Sync>; pub type Result<T> = std::result::Result<T, Error>; pub type Offsets = (usize, usize); /// Takes care of pre-processing strings. pub trait Normalizer { fn normalize(&self, normalized: &mut NormalizedString) -> Result<()>; } /// The `PreTokenizer` is in charge of doing the pre-segmentation step. It splits the given string /// in multiple substrings, keeping track of the offsets of said substrings from the /// `NormalizedString`. In some occasions, the `PreTokenizer` might need to modify the given /// `NormalizedString` to ensure we can entirely keep track of the offsets and the mapping with /// the original string. pub trait PreTokenizer { fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()>; } /// Represents a model used during Tokenization (like BPE or Word or Unigram). pub trait Model { type Trainer: Trainer + Sync; /// Tokenize the given sequence into multiple underlying `Token`. The `offsets` on the `Token` /// are expected to be relative to the given sequence. fn tokenize(&self, sequence: &str) -> Result<Vec<Token>>; /// Find the ID associated to a string token fn token_to_id(&self, token: &str) -> Option<u32>; /// Find the string token associated to an ID fn id_to_token(&self, id: u32) -> Option<String>; /// Retrieve the entire vocabulary mapping (token -> ID) fn get_vocab(&self) -> HashMap<String, u32>; /// Retrieve the size of the vocabulary fn get_vocab_size(&self) -> usize; /// Save the current `Model` in the given folder, using the given `prefix` for the various /// files that need to be saved. fn save(&self, folder: &Path, prefix: Option<&str>) -> Result<Vec<PathBuf>>; /// Get an instance of a Trainer capable of training this Model fn get_trainer(&self) -> <Self as Model>::Trainer; } /// A `PostProcessor` has the responsibility to post process an encoded output of the `Tokenizer`. /// It adds any special tokens that a language model would require. pub trait PostProcessor { /// Returns the number of tokens that will be added during the processing step fn added_tokens(&self, is_pair: bool) -> usize; /// Process both encodings and returns a new merged one fn process( &self, encoding: Encoding, pair_encoding: Option<Encoding>, add_special_tokens: bool, ) -> Result<Encoding> { let mut encodings = if let Some(pair_encoding) = pair_encoding { vec![encoding, pair_encoding] } else { vec![encoding] }; encodings.iter_mut().enumerate().for_each(|(i, encoding)| { encoding.set_sequence_id(i); encoding .get_overflowing_mut() .iter_mut() .for_each(|encoding| encoding.set_sequence_id(i)); encoding.set_type_ids(vec![i as u32; encoding.len()]); }); let encodings = self.process_encodings(encodings, add_special_tokens)?; Ok(Encoding::merge(encodings, false)) } /// Process any amount of encodings and returns a series of encoding (might merge them) fn process_encodings( &self, encodings: Vec<Encoding>, add_special_tokens: bool, ) -> Result<Vec<Encoding>>; } impl dyn PostProcessor { pub fn default_process( encodings: Vec<Encoding>, _add_special_tokens: bool, ) -> Result<Vec<Encoding>> { match encodings.len() { 1 => Ok(encodings), _ => { let mut final_encoding = Encoding::default(); for (i, mut encoding) in encodings.into_iter().enumerate() { encoding.set_sequence_id(i); final_encoding.merge_with(encoding, false); } Ok(vec![final_encoding]) } } } } #[derive(thiserror::Error, Debug)] pub enum ProcessorError { #[error("encodings vector length must be either 1 or 2")] InvalidEncodingsVecLength, } /// A `Decoder` changes the raw tokens into its more readable form. pub trait Decoder { fn decode(&self, tokens: Vec<String>) -> Result<String> { let results = self.decode_chain(tokens)?; Ok(results.join("")) } fn decode_chain(&self, tokens: Vec<String>) -> Result<Vec<String>>; } /// A `Trainer` has the responsibility to train a model. We feed it with lines/sentences /// and then it can train the given `Model`. pub trait Trainer { type Model: Model + Sized; /// Whether we should show progress during the training. fn should_show_progress(&self) -> bool; /// The actual training method. This will return a new trained Model as well as a list /// of `special_tokens` to be added directly to the tokenizer along with the model. fn train(&self, model: &mut Self::Model) -> Result<Vec<AddedToken>>; /// Process an iterator of sequences, calling `process` for each of them in order to /// pre-process the said sequence as relevant. fn feed<I, S, F>(&mut self, iterator: I, process: F) -> Result<()> where I: Iterator<Item = S> + Send, S: AsRef<str> + Send, F: Fn(&str) -> Result<Vec<String>> + Sync; } #[derive(Debug, Clone, PartialEq, Eq)] pub struct Token { pub id: u32, pub value: String, pub offsets: (usize, usize), } impl Token { pub fn new(id: u32, value: String, offsets: (usize, usize)) -> Self { Self { id, value, offsets } } } use std::borrow::Cow; #[derive(Debug, Clone)] pub enum InputSequence<'s> { Raw(Cow<'s, str>), PreTokenized(Cow<'s, [&'s str]>), PreTokenizedOwned(Cow<'s, [String]>), PreTokenizedCow(Cow<'s, [Cow<'s, str>]>), } impl<'s> From<Cow<'s, str>> for InputSequence<'s> { fn from(input: Cow<'s, str>) -> Self { Self::Raw(input) } } impl<'s> From<&'s str> for InputSequence<'s> { fn from(input: &'s str) -> Self { Self::Raw(Cow::Borrowed(input)) } } impl From<String> for InputSequence<'_> { fn from(input: String) -> Self { Self::Raw(Cow::Owned(input)) } } impl<'s> From<&'s [&'s str]> for InputSequence<'s> { fn from(input: &'s [&'s str]) -> Self { Self::PreTokenized(Cow::Borrowed(input)) } } impl<'s> From<Vec<&'s str>> for InputSequence<'s> { fn from(input: Vec<&'s str>) -> Self { Self::PreTokenized(Cow::Owned(input)) } } impl<'s> From<&'s [String]> for InputSequence<'s> { fn from(input: &'s [String]) -> Self { Self::PreTokenizedOwned(Cow::Borrowed(input)) } } impl<'s> From<Vec<String>> for InputSequence<'s> { fn from(input: Vec<String>) -> Self { Self::PreTokenizedOwned(Cow::Owned(input)) } } impl<'s> From<Vec<Cow<'s, str>>> for InputSequence<'s> { fn from(input: Vec<Cow<'s, str>>) -> Self { Self::PreTokenizedCow(Cow::Owned(input)) } } impl<'s> From<&'s [Cow<'s, str>]> for InputSequence<'s> { fn from(input: &'s [Cow<'s, str>]) -> Self { Self::PreTokenizedCow(Cow::Borrowed(input)) } } #[derive(Debug, Clone)] pub enum EncodeInput<'s> { Single(InputSequence<'s>), Dual(InputSequence<'s>, InputSequence<'s>), } impl<'s, I: Into<InputSequence<'s>>> From<I> for EncodeInput<'s> { fn from(input: I) -> Self { Self::Single(input.into()) } } impl<'s, I1, I2> From<(I1, I2)> for EncodeInput<'s> where I1: Into<InputSequence<'s>>, I2: Into<InputSequence<'s>>, { fn from(input: (I1, I2)) -> Self { Self::Dual(input.0.into(), input.1.into()) } } #[derive(thiserror::Error, Debug)] #[error("{0}")] pub struct BuilderError(String); /// Builder for Tokenizer structs. /// /// `build()` fails if the `model` is missing. pub struct TokenizerBuilder<M, N, PT, PP, D> { model: Option<M>, normalizer: Option<N>, pre_tokenizer: Option<PT>, post_processor: Option<PP>, decoder: Option<D>, added_vocabulary: AddedVocabulary, truncation: Option<TruncationParams>, padding: Option<PaddingParams>, } impl<M, N, PT, PP, D> Default for TokenizerBuilder<M, N, PT, PP, D> where M: Model, N: Normalizer, PT: PreTokenizer, PP: PostProcessor, D: Decoder, { fn default() -> Self { Self::new() } } impl<M, N, PT, PP, D> TokenizerBuilder<M, N, PT, PP, D> where M: Model, N: Normalizer, PT: PreTokenizer, PP: PostProcessor, D: Decoder, { /// Get an empty TokenizerBuilder. pub fn new() -> Self { Self { model: None, normalizer: None, pre_tokenizer: None, post_processor: None, decoder: None, added_vocabulary: AddedVocabulary::new(), truncation: None, padding: None, } } /// Convert the TokenizerBuilder to a Tokenizer. /// /// Conversion fails if the `model` is missing. pub fn build(self) -> Result<TokenizerImpl<M, N, PT, PP, D>> { let model = self .model .ok_or_else(|| Box::new(BuilderError("Model missing.".into())))?; Ok(TokenizerImpl { normalizer: self.normalizer, pre_tokenizer: self.pre_tokenizer, model, post_processor: self.post_processor, decoder: self.decoder, added_vocabulary: self.added_vocabulary, truncation: self.truncation, padding: self.padding, }) } /// Set the model. #[must_use] pub fn with_model(mut self, model: M) -> Self { self.model = Some(model); self } /// Set the normalizer. #[must_use] pub fn with_normalizer(mut self, normalizer: Option<N>) -> Self { self.normalizer = normalizer; self } /// Set the pre-tokenizer. #[must_use] pub fn with_pre_tokenizer(mut self, pretokenizer: Option<PT>) -> Self { self.pre_tokenizer = pretokenizer; self } /// Set the post-processor. #[must_use] pub fn with_post_processor(mut self, post_processor: Option<PP>) -> Self { self.post_processor = post_processor; self } /// Set the decoder. #[must_use] pub fn with_decoder(mut self, decoder: Option<D>) -> Self { self.decoder = decoder; self } /// Set the trunaction parameters. #[must_use] pub fn with_truncation(mut self, trunc: Option<TruncationParams>) -> Self { self.truncation = trunc; self } /// Set the padding parameters. #[must_use] pub fn with_padding(mut self, padding: Option<PaddingParams>) -> Self { self.padding = padding; self } } #[derive(Serialize, Deserialize, Debug, Clone)] pub struct Tokenizer( TokenizerImpl< ModelWrapper, NormalizerWrapper, PreTokenizerWrapper, PostProcessorWrapper, DecoderWrapper, >, ); impl Tokenizer { /// Construct a new Tokenizer based on the model. pub fn new(model: impl Into<ModelWrapper>) -> Self { Self(TokenizerImpl::new(model.into())) } /// Unwrap the TokenizerImpl. pub fn into_inner( self, ) -> TokenizerImpl< ModelWrapper, NormalizerWrapper, PreTokenizerWrapper, PostProcessorWrapper, DecoderWrapper, > { self.0 } pub fn from_file<P: AsRef<Path>>(file: P) -> Result<Self> { let content = read_to_string(file)?; let tokenizer = serde_json::from_str(&content)?; Ok(tokenizer) } pub fn from_bytes<P: AsRef<[u8]>>(bytes: P) -> Result<Self> { let tokenizer = serde_json::from_slice(bytes.as_ref())?; Ok(tokenizer) } #[cfg(feature = "http")] pub fn from_pretrained<S: AsRef<str>>( identifier: S, params: Option<crate::utils::from_pretrained::FromPretrainedParameters>, ) -> Result<Self> { let tokenizer_file = crate::utils::from_pretrained::from_pretrained(identifier, params)?; Tokenizer::from_file(tokenizer_file) } } impl std::str::FromStr for Tokenizer { type Err = Box<dyn std::error::Error + Send + Sync>; fn from_str(s: &str) -> Result<Self> { Ok(serde_json::from_str(s)?) } } impl<M, N, PT, PP, D> From<TokenizerImpl<M, N, PT, PP, D>> for Tokenizer where M: Into<ModelWrapper>, N: Into<NormalizerWrapper>, PT: Into<PreTokenizerWrapper>, PP: Into<PostProcessorWrapper>, D: Into<DecoderWrapper>, { fn from(t: TokenizerImpl<M, N, PT, PP, D>) -> Self { Self(TokenizerImpl { model: t.model.into(), normalizer: t.normalizer.map(Into::into), pre_tokenizer: t.pre_tokenizer.map(Into::into), post_processor: t.post_processor.map(Into::into), decoder: t.decoder.map(Into::into), added_vocabulary: t.added_vocabulary, padding: t.padding, truncation: t.truncation, }) } } impl Deref for Tokenizer { type Target = TokenizerImpl< ModelWrapper, NormalizerWrapper, PreTokenizerWrapper, PostProcessorWrapper, DecoderWrapper, >; fn deref(&self) -> &Self::Target { &self.0 } } impl DerefMut for Tokenizer { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } /// A `Tokenizer` is capable of encoding/decoding any text. #[derive(Clone, Debug)] pub struct TokenizerImpl<M, N, PT, PP, D> { // Tokenizer parts normalizer: Option<N>, pre_tokenizer: Option<PT>, model: M, post_processor: Option<PP>, decoder: Option<D>, // Added Vocabulary capabilities added_vocabulary: AddedVocabulary, // General processing parameters truncation: Option<TruncationParams>, padding: Option<PaddingParams>, } impl<M, N, PT, PP, D> TokenizerImpl<M, N, PT, PP, D> where M: Model, N: Normalizer, PT: PreTokenizer, PP: PostProcessor, D: Decoder, { /// Instantiate a new Tokenizer, with the given Model pub fn new(model: M) -> Self { Self { normalizer: None, pre_tokenizer: None, model, post_processor: None, decoder: None, added_vocabulary: AddedVocabulary::new(), truncation: None, padding: None, } } /// Set the normalizer pub fn with_normalizer(&mut self, normalizer: impl Into<N>) -> &mut Self { self.normalizer = Some(normalizer.into()); self } /// Get the normalizer pub fn get_normalizer(&self) -> Option<&N> { self.normalizer.as_ref() } /// Set the pre tokenizer pub fn with_pre_tokenizer(&mut self, pre_tokenizer: impl Into<PT>) -> &mut Self { self.pre_tokenizer = Some(pre_tokenizer.into()); self } /// Get the pre tokenizer pub fn get_pre_tokenizer(&self) -> Option<&PT> { self.pre_tokenizer.as_ref() } /// Set the post processor pub fn with_post_processor(&mut self, post_processor: impl Into<PP>) -> &mut Self { self.post_processor = Some(post_processor.into()); self } /// Get the post processor pub fn get_post_processor(&self) -> Option<&PP> { self.post_processor.as_ref() } /// Set the decoder pub fn with_decoder(&mut self, decoder: impl Into<D>) -> &mut Self { self.decoder = Some(decoder.into()); self } /// Get the decoder pub fn get_decoder(&self) -> Option<&D> { self.decoder.as_ref() } /// Set the model pub fn with_model(&mut self, model: impl Into<M>) -> &mut Self { self.model = model.into(); self } /// Get the model pub fn get_model(&self) -> &M { &self.model } /// Set the truncation parameters pub fn with_truncation(&mut self, trunc: Option<TruncationParams>) -> &mut Self { self.truncation = trunc; self } /// Get the currently set truncation parameters pub fn get_truncation(&self) -> Option<&TruncationParams> { self.truncation.as_ref() } /// Get a mutable reference to the currently set truncation parameters pub fn get_truncation_mut(&mut self) -> Option<&mut TruncationParams> { self.truncation.as_mut() } /// Set the padding parameters pub fn with_padding(&mut self, padding: Option<PaddingParams>) -> &mut Self { self.padding = padding; self } /// Get the currently set padding parameters pub fn get_padding(&self) -> Option<&PaddingParams> { self.padding.as_ref() } /// Get a mutable reference to the currently set padding parameters pub fn get_padding_mut(&mut self) -> Option<&mut PaddingParams> { self.padding.as_mut() } /// Get the vocabulary pub fn get_vocab(&self, with_added_tokens: bool) -> HashMap<String, u32> { let mut final_vocab = self.model.get_vocab(); if with_added_tokens { let added_vocab = self.added_vocabulary.get_vocab(); if !added_vocab.is_empty() { final_vocab.reserve(added_vocab.len()); for (token, id) in added_vocab { final_vocab.insert(token.clone(), *id); } } } final_vocab } /// Get the size of the vocabulary pub fn get_vocab_size(&self, with_added_tokens: bool) -> usize { self.model.get_vocab_size() + if with_added_tokens { self.added_vocabulary.len() } else { 0 } } /// Converts a token in the corresponding id. pub fn token_to_id(&self, token: &str) -> Option<u32> { self.added_vocabulary.token_to_id(token, &self.model) } /// Converts an id to the corresponding token. pub fn id_to_token(&self, id: u32) -> Option<String> { self.added_vocabulary.id_to_token(id, &self.model) } /// Encode a single sequence fn encode_single_sequence( &self, sequence: InputSequence, type_id: u32, offsets_type: OffsetType, ) -> Result<Encoding> { let encode = |is_pre_tokenized, subseq_idx, subseq| -> Result<Encoding> { let normalized = self .added_vocabulary .extract_and_normalize(self.normalizer.as_ref(), subseq); let pre_tokenized = self.do_pre_tokenize(normalized)?; let subseq_encoding = self.do_tokenize( pre_tokenized, type_id, if is_pre_tokenized { Some(subseq_idx as u32) } else { None }, offsets_type, )?; Ok(subseq_encoding) }; match sequence { InputSequence::PreTokenized(seq) => seq .iter() .enumerate() .map(|(i, sequence)| encode(true, i, sequence)) .collect(), InputSequence::PreTokenizedOwned(seq) => seq .iter() .enumerate() .map(|(i, sequence)| encode(true, i, sequence)) .collect(), InputSequence::PreTokenizedCow(seq) => seq .iter() .enumerate() .map(|(i, sequence)| encode(true, i, sequence)) .collect(), InputSequence::Raw(seq) => encode(false, 0, seq.as_ref()), } } /// Encode the given input. This method accepts both single sequences, as well as pair /// sequences. Also, a sequence can be a string, or already pre-tokenized input directly: /// /// ``` /// # use tokenizers::Tokenizer; /// # use tokenizers::models::bpe::BPE; /// # let mut tokenizer = Tokenizer::new(BPE::default()); /// # /// // Sequences: /// tokenizer.encode("Single sequence", false); /// tokenizer.encode(("Sequence A", "Sequence B"), false); /// /// // Pre-tokenized sequences: /// tokenizer.encode(&["Single", "sequence"][..], false); /// tokenizer.encode(( /// &["Sequence", "A"][..], /// &["Sequence", "B"][..] /// ), false); /// /// // or even both types together: /// tokenizer.encode(("A complete sequence", &["And", "a", "tokenized"][..]), false); /// ``` pub fn encode<'s, E>(&self, input: E, add_special_tokens: bool) -> Result<Encoding> where E: Into<EncodeInput<'s>>, { // Extract sequences from the EncodeInput let (sequence, pair) = match input.into() { EncodeInput::Single(s1) => (s1, None), EncodeInput::Dual(s1, s2) => (s1, Some(s2)), }; // Encode each sequence let encoding = self.encode_single_sequence(sequence, 0, OffsetType::Byte)?; let pair_encoding = pair .map(|sequence| self.encode_single_sequence(sequence, 1, OffsetType::Byte)) .transpose()?; // And finally post process self.post_process(encoding, pair_encoding, add_special_tokens) } /// Encode the given input, using offsets relative to chars instead of bytes. /// This method accepts both single sequences, as well as pair sequences. Also, /// a sequence can be a string, or already pre-tokenized input directly: /// /// ``` /// # use tokenizers::Tokenizer; /// # use tokenizers::models::bpe::BPE; /// # let mut tokenizer = Tokenizer::new(BPE::default()); /// # /// // Sequences: /// tokenizer.encode("Single sequence", false); /// tokenizer.encode(("Sequence A", "Sequence B"), false); /// /// // Pre-tokenized sequences: /// tokenizer.encode(&["Single", "sequence"][..], false); /// tokenizer.encode(( /// &["Sequence", "A"][..], /// &["Sequence", "B"][..] /// ), false); /// /// // or even both types together: /// tokenizer.encode(("A complete sequence", &["And", "a", "tokenized"][..]), false); /// ``` pub fn encode_char_offsets<'s, E>(&self, input: E, add_special_tokens: bool) -> Result<Encoding> where E: Into<EncodeInput<'s>>, { // Extract sequences from the EncodeInput let (sequence, pair) = match input.into() { EncodeInput::Single(s1) => (s1, None), EncodeInput::Dual(s1, s2) => (s1, Some(s2)), }; // Encode each sequence let encoding = self.encode_single_sequence(sequence, 0, OffsetType::Char)?; let pair_encoding = pair .map(|sequence| self.encode_single_sequence(sequence, 1, OffsetType::Char)) .transpose()?; // And finally post process self.post_process(encoding, pair_encoding, add_special_tokens) } /// Decode the given ids, back to a String pub fn decode(&self, ids: &[u32], skip_special_tokens: bool) -> Result<String> { let tokens = ids .iter() .filter_map(|id| { self.added_vocabulary .id_to_token(*id, &self.model) .filter(|token| { !skip_special_tokens || !self.added_vocabulary.is_special_token(token) }) }) .collect::<Vec<_>>(); if let Some(decoder) = &self.decoder { decoder.decode(tokens) } else { Ok(tokens.join(" ")) } } } impl<M, N, PT, PP, D> TokenizerImpl<M, N, PT, PP, D> where M: Model, { /// Tokenization logic, makes the bridge between the pre-tokenization phase and the real /// tokenization phase, and converting offsets back to the original referential. fn do_tokenize<P: Into<PreTokenizedString>>( &self, pretokenized: P, type_id: u32, word_idx: Option<u32>, offsets_type: OffsetType, ) -> Result<Encoding> { let mut pretokenized: PreTokenizedString = pretokenized.into(); pretokenized.tokenize(|normalized| self.model.tokenize(normalized.get()))?; pretokenized.into_encoding(word_idx, type_id, offsets_type) } } impl<M, N, PT, PP, D> TokenizerImpl<M, N, PT, PP, D> where N: Normalizer, { /// Normalization logic, go through all normalizers fn do_normalize<V: Into<NormalizedString>>(&self, normalized: V) -> Result<NormalizedString> { let mut normalized: NormalizedString = normalized.into(); if let Some(ref normalizer) = self.normalizer { normalizer.normalize(&mut normalized)?; } Ok(normalized) } } impl<M, N, PT, PP, D> TokenizerImpl<M, N, PT, PP, D> where N: Normalizer, M: Model, { /// Register the given tokens as special tokens. This is especially useful for removing /// these special tokens while decoding pub fn add_special_tokens(&mut self, tokens: &[AddedToken]) -> usize { self.added_vocabulary .add_special_tokens(tokens, &self.model, self.normalizer.as_ref()) } /// Add the given tokens to the added vocabulary pub fn add_tokens(&mut self, tokens: &[AddedToken]) -> usize { self.added_vocabulary .add_tokens(tokens, &self.model, self.normalizer.as_ref()) } } impl<M, N, PT, PP, D> TokenizerImpl<M, N, PT, PP, D> where PT: PreTokenizer, { /// PreTokenization logic, handling the case where there is no PreTokenizer set fn do_pre_tokenize<P: Into<PreTokenizedString>>( &self, pretokenized: P, ) -> Result<PreTokenizedString> { let mut pretokenized: PreTokenizedString = pretokenized.into(); if let Some(ref pretok) = self.pre_tokenizer { pretok.pre_tokenize(&mut pretokenized)?; } Ok(pretokenized) } } impl<M, N, PT, PP, D> TokenizerImpl<M, N, PT, PP, D> where PP: PostProcessor, { /// Post processing logic, handling the case where there is no PostProcessor set pub fn post_process( &self, encoding: Encoding, pair_encoding: Option<Encoding>, add_special_tokens: bool, ) -> Result<Encoding> { // 1. First we truncate if needed let (encoding, pair_encoding) = { if let Some(trunc) = &self.truncation { let n_added_tokens = if let Some(processor) = &self.post_processor { processor.added_tokens(pair_encoding.is_some()) } else { 0 }; if add_special_tokens && n_added_tokens > 0 { let params = TruncationParams { max_length: trunc.max_length - n_added_tokens, ..*trunc }; truncate_encodings(encoding, pair_encoding, &params)? } else { truncate_encodings(encoding, pair_encoding, trunc)? } } else { (encoding, pair_encoding) } }; // 2. Then We post process let final_encoding = if let Some(processor) = &self.post_processor { processor.process(encoding, pair_encoding, add_special_tokens)? } else { let encodings = if let Some(pair_encoding) = pair_encoding { vec![encoding, pair_encoding] } else { vec![encoding] }; let mut encodings = <dyn PostProcessor>::default_process(encodings, add_special_tokens)?; if encodings.len() != 1 { panic!("We haven't reduced the encodings like we should have"); } encodings.pop().unwrap() }; // 3. Then we pad if needed let [final_encoding] = if let Some(params) = &self.padding { let mut arr = [final_encoding]; pad_encodings(&mut arr, params)?; arr } else { [final_encoding] }; Ok(final_encoding) } } impl<M, N, PT, PP, D> TokenizerImpl<M, N, PT, PP, D> where M: Model + Send + Sync, N: Normalizer + Send + Sync, PT: PreTokenizer + Send + Sync, PP: PostProcessor + Send + Sync, D: Decoder + Send + Sync, { /// Encode all the sentences in parallel, using multiple threads pub fn encode_batch<'s, E>( &self, inputs: Vec<E>, add_special_tokens: bool, ) -> Result<Vec<Encoding>> where E: Into<EncodeInput<'s>> + Send, { let mut encodings = inputs .into_maybe_par_iter() .map(|input| self.encode(input, add_special_tokens)) .collect::<Result<Vec<Encoding>>>()?; if let Some(params) = &self.padding { // We do the padding here to make sure we handle the batch padding pad_encodings(&mut encodings, params)?; } Ok(encodings) } /// Encode all the sentences in parallel, using multiple threads. /// The offsets on each `Encoding` will be relative to chars instead of bytes. pub fn encode_batch_char_offsets<'s, E>( &self, inputs: Vec<E>, add_special_tokens: bool, ) -> Result<Vec<Encoding>> where E: Into<EncodeInput<'s>> + Send, { let mut encodings = inputs .into_maybe_par_iter() .map(|input| self.encode_char_offsets(input, add_special_tokens)) .collect::<Result<Vec<Encoding>>>()?; if let Some(params) = &self.padding { // We do the padding here to make sure we handle the batch padding pad_encodings(&mut encodings, params)?; } Ok(encodings) } /// Decode all sentences in parallel pub fn decode_batch( &self, sentences: &[&[u32]], skip_special_tokens: bool, ) -> Result<Vec<String>> where M: Send + Sync, { sentences .into_maybe_par_iter() .map(|sentence| self.decode(sentence, skip_special_tokens)) .collect() } /// Train our Model from files pub fn train_from_files<T>(&mut self, trainer: &mut T, files: Vec<String>) -> Result<&mut Self> where T: Trainer<Model = M> + Sync, { let mut len = 0; for file in files.iter() { len += File::open(file) .and_then(|f| f.metadata()) .map(|m| m.len())?; } let max_read = 1_000_000; ResultShunt::process( files.into_iter().flat_map(|filename| { match File::open(filename) { Ok(file) => { let file = BufReader::with_capacity(max_read, file); // We read new lines using this API instead of the Lines Iterator // on purpose. We want to keep the `\n` and potential `\r` between each lines // We use an iterator to be able to chain with par_bridge. itertools::Either::Left(file.lines_with_ending()) } Err(e) => itertools::Either::Right(std::iter::once(Err(e))), } }), |sequences| -> Result<()> { let progress = if trainer.should_show_progress() { let progress = ProgressBar::new(len); progress.set_style( ProgressStyle::default_bar() .template("[{elapsed_precise}] {msg:<40!} {wide_bar} {percent:>18!}%"), ); progress .set_message(&format!("Pre-processing files ({:.2} Mo)", len / 1_000_000)); progress.set_draw_delta(len / 100); // Redraw only every 2% Some(progress) } else { None }; trainer.feed( sequences.map(|s| { if let Some(progress) = &progress { progress.inc(s.len() as u64) } s }), |seq| { let normalized = self.do_normalize(seq.as_ref())?; let pre_tokenized = self.do_pre_tokenize(normalized)?; Ok(pre_tokenized .get_splits(OffsetReferential::Original, OffsetType::Byte) .into_iter() .map(|(s, _, _)| s.to_owned()) .collect()) }, )?; if let Some(pbar) = progress { pbar.finish(); } let special_tokens = trainer.train(&mut self.model)?; self.add_special_tokens(&special_tokens); Ok(()) }, )??; Ok(self) } /// Train our Model, using the given Trainer and iterator pub fn train<T, I, S>(&mut self, trainer: &mut T, sequences: I) -> Result<&mut Self> where T: Trainer<Model = M> + Sync, I: Iterator<Item = S> + Send, S: AsRef<str> + Send, { let (lower, upper) = sequences.size_hint(); let len = upper.unwrap_or(lower) as u64; let progress = if trainer.should_show_progress() { let progress = ProgressBar::new(len); progress.set_style( ProgressStyle::default_bar() .template("[{elapsed_precise}] {msg:<40!} {wide_bar} {pos:<9!}/{len:>9!}"), ); progress.set_message("Pre-processing sequences"); if len > 0 { progress.set_draw_delta(len / 100); // Redraw only every 2% } else { // Trying to have a good default to avoid progress tracking being the bottleneck progress.set_draw_delta(1000); } Some(progress) } else { None }; trainer.feed( sequences.map(|s| { if let Some(progress) = &progress { progress.inc(1) } s }), |seq| { let normalized = self.do_normalize(seq.as_ref())?; let pre_tokenized = self.do_pre_tokenize(normalized)?; Ok(pre_tokenized .get_splits(OffsetReferential::Original, OffsetType::Byte) .into_iter() .map(|(s, _, _)| s.to_owned()) .collect()) }, )?; if let Some(pbar) = progress { pbar.finish(); } let special_tokens = trainer.train(&mut self.model)?; self.add_special_tokens(&special_tokens); Ok(self) } } impl<M, N, PT, PP, D> std::str::FromStr for TokenizerImpl<M, N, PT, PP, D> where M: for<'de> Deserialize<'de> + Model, N: for<'de> Deserialize<'de> + Normalizer, PT: for<'de> Deserialize<'de> + PreTokenizer, PP: for<'de> Deserialize<'de> + PostProcessor, D: for<'de> Deserialize<'de> + Decoder, { type Err = Error; fn from_str(s: &str) -> Result<Self> { Ok(serde_json::from_str(s)?) } } impl<M, N, PT, PP, D> TokenizerImpl<M, N, PT, PP, D> where M: DeserializeOwned + Model, N: DeserializeOwned + Normalizer, PT: DeserializeOwned + PreTokenizer, PP: DeserializeOwned + PostProcessor, D: DeserializeOwned + Decoder, { /// Instantiate a new Tokenizer from the given file pub fn from_file<P: AsRef<Path>>(file: P) -> Result<Self> { let content = read_to_string(file)?; let tokenizer = serde_json::from_str(&content)?; Ok(tokenizer) } } impl<M, N, PT, PP, D> TokenizerImpl<M, N, PT, PP, D> where M: DeserializeOwned + Model, N: DeserializeOwned + Normalizer, PT: DeserializeOwned + PreTokenizer, PP: DeserializeOwned + PostProcessor, D: DeserializeOwned + Decoder, { /// Instantiate a new Tokenizer from bytes pub fn from_bytes<P: AsRef<[u8]>>(bytes: P) -> Result<Self> { let tokenizer = serde_json::from_slice(bytes.as_ref())?; Ok(tokenizer) } } impl<M, N, PT, PP, D> TokenizerImpl<M, N, PT, PP, D> where M: DeserializeOwned + Model, N: DeserializeOwned + Normalizer, PT: DeserializeOwned + PreTokenizer, PP: DeserializeOwned + PostProcessor, D: DeserializeOwned + Decoder, { #[cfg(feature = "http")] /// Instantiate a new Tokenizer from a file hosted on the Hugging Face Hub. /// It expects the `identifier` of a model that includes a `tokenizer.json` file. pub fn from_pretrained<S: AsRef<str>>( identifier: S, params: Option<crate::utils::from_pretrained::FromPretrainedParameters>, ) -> Result<Self> { let tokenizer_file = crate::utils::from_pretrained::from_pretrained(identifier, params)?; TokenizerImpl::from_file(tokenizer_file) } } impl<M, N, PT, PP, D> TokenizerImpl<M, N, PT, PP, D> where M: Serialize, N: Serialize, PT: Serialize, PP: Serialize, D: Serialize, { /// Serialize the current tokenizer as a String pub fn to_string(&self, pretty: bool) -> Result<String> { Ok(if pretty { serde_json::to_string_pretty(self)? } else { serde_json::to_string(self)? }) } /// Save the current tokenizer at the given path pub fn save<P: AsRef<Path>>(&self, path: P, pretty: bool) -> Result<()> { let serialized = self.to_string(pretty)?; let mut file = File::create(path)?; file.write_all(serialized.as_bytes())?; Ok(()) } }
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/tokenizer/pre_tokenizer.rs
use crate::{ normalizer::Range, Encoding, NormalizedString, OffsetReferential, Offsets, Result, Token, }; use std::collections::HashMap; /// Various possible types of offsets #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum OffsetType { Byte, Char, } /// Wrapper for a subpart of a `NormalizedString`. /// /// This Split contains the underlying `NormalizedString` as well as its offsets /// in the original string. These offsets are in the `original` referential. /// It also contains any `Token` associated to the current split #[derive(Debug, Clone, PartialEq, Eq)] pub struct Split { /// The underlying `NormalizedString`. Each SubString is represented by a `NormalizedString` /// and in the end we might be carrying a lot of SubString representing various parts of the /// original input string. normalized: NormalizedString, /// Optional Tokens associated to this Split tokens: Option<Vec<Token>>, } impl From<NormalizedString> for Split { fn from(n: NormalizedString) -> Self { Self { normalized: n, tokens: None, } } } impl From<(NormalizedString, Option<Vec<Token>>)> for Split { fn from(f: (NormalizedString, Option<Vec<Token>>)) -> Self { Self { normalized: f.0, tokens: f.1, } } } /// The `PreTokenizedString` is in charge of splitting an underlying string, /// making sure everything is fine while doing so, and providing ways to normalize /// and tokenize these splits. /// Once everything has been normalized and tokenized, the `PreTokenizedString` is able /// to build an `Encoding` with all the relevant offsets and word ids, relative to the /// original string. #[derive(Debug, Clone, PartialEq, Eq)] pub struct PreTokenizedString { original: String, splits: Vec<Split>, } impl PreTokenizedString { /// Split the `PreTokenizedString` by providing a `split_fn` in charge of splitting /// each substring (`NormalizedString`) into multiple parts. /// /// `split_fn` takes a `NormalizedString` and is in charge of returning an iterator /// over the produced `NormalizedString`. `split_fn` is free of modifying these /// `NormalizedString` as relevant, as long as it respects the constraint stated below. /// /// There are only one constraint that *MUST* be respected: /// > The produced `NormalizedString`, if combined back together, must have the /// same `original` string as the original one given to `split_fn`. This concretely /// means that for the offset tracking to work as expected, `split_fn` must produce /// "splits" of the original string. pub fn split<F, U, R>(&mut self, mut split_fn: F) -> Result<()> where F: FnMut(usize, NormalizedString) -> Result<U>, U: IntoIterator<Item = R>, R: Into<Split>, { // new_splits is at least as big as self.splits let mut new_splits = Vec::with_capacity(self.splits.len()); for (i, original_split) in self.splits.drain(..).enumerate() { if original_split.tokens.is_some() { new_splits.push(original_split); continue; } new_splits.extend( split_fn(i, original_split.normalized)? .into_iter() .filter_map(|split| { let split: Split = split.into(); if split.normalized.is_empty() { None } else { Some(split) } }), ); } self.splits = new_splits; Ok(()) } /// Normalized all the splits that do not have attached `Tokens`, using the provided /// `normalize` function. pub fn normalize<F>(&mut self, normalize: F) -> Result<()> where F: Fn(&mut NormalizedString) -> Result<()>, { for split in self.splits.iter_mut().filter(|s| s.tokens.is_none()) { normalize(&mut split.normalized)?; } Ok(()) } /// Tokenize all the splits that do not have attached `Tokens`, using the provided /// `tokenize` function pub fn tokenize<F>(&mut self, tokenize: F) -> Result<()> where F: Fn(&NormalizedString) -> Result<Vec<Token>>, { for split in self.splits.iter_mut().filter(|s| s.tokens.is_none()) { split.tokens = Some(tokenize(&split.normalized)?); } Ok(()) } /// Transform the current `PreTokenizedString` into an `Encoding`. /// /// If a `word_idx` is provided, any word in the generated `Encoding` /// will be set to this value. This is generally used with pre-tokenized /// input, that do not need the `PreTokenizedString` to generate word ids. /// /// This method will fail if some splits do not have associated `Token`. pub fn into_encoding( self, word_idx: Option<u32>, type_id: u32, offset_type: OffsetType, ) -> Result<Encoding> { if self.splits.is_empty() { Ok(Encoding::default()) } else if !self.splits.iter().all(|split| split.tokens.is_some()) { Err("Split has not been tokenized, call `PreTokenizedString::tokenize` first".into()) } else { let offset_converter = match offset_type { OffsetType::Char => Some(BytesToCharOffsetConverter::new(&self.original)), OffsetType::Byte => None, }; Ok(self .splits .into_iter() .enumerate() .flat_map(|(idx, split)| { let normalized = split.normalized; let offsets = normalized.offsets_original(); let offset_converter = &offset_converter; split.tokens.unwrap().into_iter().map(move |token| { let mut offsets = normalized .convert_offsets(Range::Normalized(token.offsets.0..token.offsets.1)) .map_or(token.offsets, |range| { (offsets.0 + range.start, offsets.0 + range.end) }); // Convert to char offsets if relevant if let Some(converter) = offset_converter { offsets = converter.convert(offsets).unwrap_or(offsets); } ( token.id, token.value, offsets, if word_idx.is_some() { word_idx } else { Some(idx as u32) }, type_id, ) }) }) .collect()) } } /// Returns a list of splits, each of them being a slice of the normalized /// string, the associated offsets either in original or normalized /// referential, as well as the potention tokens pub fn get_splits( &self, offset_ref: OffsetReferential, offset_type: OffsetType, ) -> Vec<(&str, Offsets, &Option<Vec<Token>>)> { let offset_converter = match offset_type { OffsetType::Char => Some(BytesToCharOffsetConverter::new(&self.original)), OffsetType::Byte => None, }; let mut offset = 0; self.splits .iter() .map(|split| { let mut offsets = match offset_ref { OffsetReferential::Original => split.normalized.offsets_original(), OffsetReferential::Normalized => { let len = split.normalized.len(); offset += len; (offset - len, offset) } }; // Convert to char offsets if relevant if let Some(ref converter) = offset_converter { offsets = converter.convert(offsets).unwrap_or(offsets); } (split.normalized.get(), offsets, &split.tokens) }) .collect() } } impl From<NormalizedString> for PreTokenizedString { fn from(s: NormalizedString) -> Self { Self { original: s.get_original().to_owned(), splits: vec![Split { normalized: s, tokens: None, }], } } } impl From<&str> for PreTokenizedString { fn from(s: &str) -> Self { let normalized: NormalizedString = s.into(); normalized.into() } } impl From<String> for PreTokenizedString { fn from(s: String) -> Self { let normalized: NormalizedString = s.into(); normalized.into() } } struct BytesToCharOffsetConverter { map: HashMap<usize, usize>, } impl BytesToCharOffsetConverter { pub fn new(sequence: &str) -> Self { Self { map: sequence .char_indices() .enumerate() .flat_map(|(i, (b, c))| { let mut n = 0; std::iter::repeat_with(move || { let o = (b + n, i); n += 1; o }) .take(c.len_utf8()) }) .collect(), } } pub fn convert(&self, offsets: Offsets) -> Option<Offsets> { match (self.map.get(&offsets.0), self.map.get(&offsets.1)) { (Some(start), Some(end)) => Some((*start, *end)), // If we reached the end, `end` is not in the map (Some(start), None) => { // But the one just before should be let last = self.map.get(&(offsets.1 - 1)).copied().unwrap_or(start + 1); Some((*start, last + 1)) } _ => None, } } }
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/tokenizer/serialization.rs
use std::marker::PhantomData; use serde::{ self, de::{Error, MapAccess, Visitor}, ser::SerializeStruct, Deserialize, Deserializer, Serialize, Serializer, }; use super::{added_vocabulary::AddedTokenWithId, TokenizerImpl}; use crate::{Decoder, Model, Normalizer, PostProcessor, PreTokenizer, TokenizerBuilder}; static SERIALIZATION_VERSION: &str = "1.0"; impl<M, N, PT, PP, D> Serialize for TokenizerImpl<M, N, PT, PP, D> where M: Serialize, N: Serialize, PT: Serialize, PP: Serialize, D: Serialize, { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { let mut tokenizer = serializer.serialize_struct("Tokenizer", 9)?; // Start by adding the current version tokenizer.serialize_field("version", SERIALIZATION_VERSION)?; // Params tokenizer.serialize_field("truncation", &self.truncation)?; tokenizer.serialize_field("padding", &self.padding)?; // Added tokens tokenizer.serialize_field("added_tokens", &self.added_vocabulary)?; // Then add our parts tokenizer.serialize_field("normalizer", &self.normalizer)?; tokenizer.serialize_field("pre_tokenizer", &self.pre_tokenizer)?; tokenizer.serialize_field("post_processor", &self.post_processor)?; tokenizer.serialize_field("decoder", &self.decoder)?; tokenizer.serialize_field("model", &self.model)?; tokenizer.end() } } impl<'de, M, N, PT, PP, D> Deserialize<'de> for TokenizerImpl<M, N, PT, PP, D> where M: Deserialize<'de> + Model, N: Deserialize<'de> + Normalizer, PT: Deserialize<'de> + PreTokenizer, PP: Deserialize<'de> + PostProcessor, D: Deserialize<'de> + Decoder, { fn deserialize<De>(deserializer: De) -> Result<Self, De::Error> where De: Deserializer<'de>, { deserializer.deserialize_struct( "Tokenizer", &[ "version", "truncation", "padding", "added_tokens", "normalizer", "pre_tokenizer", "post_processor", "decoder", "model", ], TokenizerVisitor( PhantomData, PhantomData, PhantomData, PhantomData, PhantomData, ), ) } } struct TokenizerVisitor<M, N, PT, PP, D>( PhantomData<M>, PhantomData<N>, PhantomData<PT>, PhantomData<PP>, PhantomData<D>, ); impl<'de, M, N, PT, PP, D> Visitor<'de> for TokenizerVisitor<M, N, PT, PP, D> where M: Deserialize<'de> + Model, N: Deserialize<'de> + Normalizer, PT: Deserialize<'de> + PreTokenizer, PP: Deserialize<'de> + PostProcessor, D: Deserialize<'de> + Decoder, { type Value = TokenizerImpl<M, N, PT, PP, D>; fn expecting(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { write!(fmt, "struct Tokenizer") } fn visit_map<V>(self, mut map: V) -> Result<Self::Value, V::Error> where V: MapAccess<'de>, { let mut builder = TokenizerBuilder::new(); let mut tokens: Vec<AddedTokenWithId> = vec![]; while let Some(key) = map.next_key::<String>()? { match key.as_ref() { "version" => { let v: String = map.next_value()?; if &v != "1.0" { return Err(Error::custom(format!("Unknown tokenizer version '{}'", v))); } } "truncation" => { builder = builder.with_truncation(map.next_value()?); } "padding" => { builder = builder.with_padding(map.next_value()?); } "added_tokens" => { tokens = map.next_value()?; } "normalizer" => { builder = builder.with_normalizer(map.next_value()?); } "pre_tokenizer" => { builder = builder.with_pre_tokenizer(map.next_value()?); } "model" => { builder = builder.with_model(map.next_value()?); } "decoder" => { builder = builder.with_decoder(map.next_value()?); } "post_processor" => { builder = builder.with_post_processor(map.next_value()?); } _ => {} }; } let mut tokenizer = builder .build() .map_err(|e| V::Error::custom(e.to_string()))?; // We take care of deserializing the added_tokens (instead of `AddedVocabulary` directly // because it let us check that associated IDs are still good, and warn the user otherwise for token in &tokens { // Warn the user if the id is different than expected let received_id = tokenizer.token_to_id(&token.token.content); if received_id != Some(token.id) { warn!( "Warning: Token '{}' was expected to have ID '{}' but was given ID '{}'", token.token.content, token.id, if let Some(rid) = received_id { rid.to_string() } else { "None".to_string() } ); } } let added_tokens: Vec<_> = tokens.into_iter().map(|token| token.token).collect(); tokenizer.add_tokens(&added_tokens[..]); Ok(tokenizer) } } #[cfg(test)] mod tests { use crate::tokenizer::Tokenizer; use std::str::FromStr; #[test] fn test_deserialization_serialization_invariant() { let tok_json = r#"{ "version": "1.0", "truncation": null, "padding": null, "added_tokens": [ { "id": 0, "content": "[SPECIAL_0]", "single_word": false, "lstrip": false, "rstrip": false, "normalized": false, "special": true }, { "id": 1, "content": "[SPECIAL_1]", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "special": false }, { "id": 2, "content": "[SPECIAL_2]", "single_word": false, "lstrip": false, "rstrip": false, "normalized": false, "special": true } ], "normalizer": null, "pre_tokenizer": null, "post_processor": null, "decoder": null, "model": { "type": "WordPiece", "unk_token": "[UNK]", "continuing_subword_prefix": "", "max_input_chars_per_word": 100, "vocab": {} } }"#; let tokenizer = Tokenizer::from_str(tok_json).unwrap(); let tok_str = serde_json::to_string_pretty(&tokenizer).unwrap(); // It should be exactly the same as above assert_eq!(tok_str, tok_json); } }
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/tokenizer/encoding.rs
use crate::parallelism::*; use crate::tokenizer::{Offsets, Token}; use crate::utils::padding::PaddingDirection; use crate::utils::truncation::TruncationDirection; use serde::{Deserialize, Serialize}; use std::collections::HashMap; use std::ops::Range; /// Represents the output of a `Tokenizer`. #[derive(Default, PartialEq, Debug, Clone, Serialize, Deserialize)] pub struct Encoding { /// IDs produced by the `Tokenizer` ids: Vec<u32>, /// Type of the IDs type_ids: Vec<u32>, /// Tokens associated to each ID tokens: Vec<String>, /// Indice of the word associated to each token/ID words: Vec<Option<u32>>, /// Offsets of the token/ID from the NormalizedString offsets: Vec<Offsets>, /// Mask identifying special tokens special_tokens_mask: Vec<u32>, /// Mask identifying padding tokens for the attention mechanism attention_mask: Vec<u32>, /// A list of overflowing Encoding generated when we got truncated overflowing: Vec<Encoding>, /// Ranges of tokens covered by each sequence. If this is empty we consider /// there is only one sequence in this Encoding, and that it covers the entire range. sequence_ranges: HashMap<usize, Range<usize>>, } impl Encoding { #[allow(clippy::too_many_arguments)] pub fn new( ids: Vec<u32>, type_ids: Vec<u32>, tokens: Vec<String>, words: Vec<Option<u32>>, offsets: Vec<Offsets>, special_tokens_mask: Vec<u32>, attention_mask: Vec<u32>, overflowing: Vec<Self>, sequence_ranges: HashMap<usize, Range<usize>>, ) -> Self { Self { ids, type_ids, tokens, words, offsets, special_tokens_mask, attention_mask, overflowing, sequence_ranges, } } pub fn with_capacity(len: usize) -> Self { Self { ids: Vec::with_capacity(len), type_ids: Vec::with_capacity(len), tokens: Vec::with_capacity(len), words: Vec::with_capacity(len), offsets: Vec::with_capacity(len), special_tokens_mask: Vec::with_capacity(len), attention_mask: Vec::with_capacity(len), overflowing: vec![], sequence_ranges: HashMap::new(), } } pub fn from_tokens(tokens: Vec<Token>, type_id: u32) -> Self { let length = tokens.len(); let (ids, tokens, offsets) = tokens.into_iter().fold( ( Vec::with_capacity(length), Vec::with_capacity(length), Vec::with_capacity(length), ), |(mut ids, mut tokens, mut offsets), t| { ids.push(t.id); tokens.push(t.value); offsets.push(t.offsets); (ids, tokens, offsets) }, ); Self { ids, tokens, offsets, words: vec![None; length], type_ids: vec![type_id; length], attention_mask: vec![1; length], special_tokens_mask: vec![0; length], overflowing: vec![], sequence_ranges: HashMap::new(), } } /// Whether this Encoding is empty pub fn is_empty(&self) -> bool { self.ids.is_empty() } /// Return the total length of this Encoding pub fn len(&self) -> usize { self.ids.len() } /// Return the number of sequences combined in this Encoding pub fn n_sequences(&self) -> usize { if self.sequence_ranges.is_empty() { 1 } else { self.sequence_ranges.len() } } /// Set the given sequence id for the whole range of tokens contained in this Encoding pub fn set_sequence_id(&mut self, sequence_id: usize) { self.sequence_ranges.insert(sequence_id, 0..self.len()); } pub fn get_tokens(&self) -> &[String] { &self.tokens[..] } pub fn get_word_ids(&self) -> &[Option<u32>] { &self.words } pub fn get_word_ids_mut(&mut self) -> &mut [Option<u32>] { &mut self.words } pub fn get_sequence_ids(&self) -> Vec<Option<usize>> { let mut sequences = vec![None; self.len()]; for seq_id in 0..self.n_sequences() { let range = self.sequence_range(seq_id); let seq_len = range.len(); sequences.splice(range, std::iter::repeat(Some(seq_id)).take(seq_len)); } sequences } pub fn get_ids(&self) -> &[u32] { &self.ids } pub fn get_type_ids(&self) -> &[u32] { &self.type_ids } pub fn set_type_ids(&mut self, type_ids: Vec<u32>) { self.type_ids = type_ids; } pub fn get_offsets(&self) -> &[Offsets] { &self.offsets } pub fn get_offsets_mut(&mut self) -> &mut [Offsets] { &mut self.offsets } pub fn get_special_tokens_mask(&self) -> &[u32] { &self.special_tokens_mask } pub fn get_attention_mask(&self) -> &[u32] { &self.attention_mask } pub fn get_overflowing(&self) -> &Vec<Encoding> { &self.overflowing } pub fn set_overflowing(&mut self, overflowing: Vec<Encoding>) { self.overflowing = overflowing; } pub fn get_overflowing_mut(&mut self) -> &mut Vec<Encoding> { &mut self.overflowing } pub fn take_overflowing(&mut self) -> Vec<Encoding> { std::mem::take(&mut self.overflowing) } pub(crate) fn process_tokens_with_offsets_mut<F>(&mut self, func: F) where F: FnMut((usize, (&String, &mut Offsets))), { self.tokens .iter() .zip(self.offsets.iter_mut()) .enumerate() .for_each(func) } /// Returns the range to target to retrieve something (word_id, offsets, ..) related to the /// given sequence id fn sequence_range(&self, sequence_id: usize) -> Range<usize> { self.sequence_ranges .get(&sequence_id) .cloned() .unwrap_or(0..self.len()) } /// Returns the index of the sequence containing the given token pub fn token_to_sequence(&self, token: usize) -> Option<usize> { if token > self.len() { None } else if self.sequence_ranges.is_empty() { Some(0) } else { self.sequence_ranges.iter().find_map(|(seq_id, range)| { if range.contains(&token) { Some(*seq_id) } else { None } }) } } /// Get the encoded tokens corresponding to the word at the given index in the input sequence, /// with the form (start_token, end_token + 1) pub fn word_to_tokens(&self, word: u32, sequence_id: usize) -> Option<(usize, usize)> { let (mut start, mut end) = (None, None); let sequence_range = self.sequence_range(sequence_id); self.words .get(sequence_range.clone())? .iter() .enumerate() .take_while(|(_, w)| **w <= Some(word)) .filter(|(_, w)| **w == Some(word)) .for_each(|(i, _)| { if start.is_none() || Some(i) < start { start = Some(i); } if end.is_none() || Some(i) >= end { end = Some(i + 1); } }); if let (Some(start), Some(end)) = (start, end) { Some((sequence_range.start + start, sequence_range.start + end)) } else { None } } /// Get the offsets of the word at the given index in the input sequence. pub fn word_to_chars(&self, word: u32, sequence_id: usize) -> Option<Offsets> { self.word_to_tokens(word, sequence_id) .and_then(|(start, end)| { if end == 0 { None } else { Some((self.offsets[start].0, self.offsets[end - 1].1)) } }) } /// Get the offsets of the token at the given index. pub fn token_to_chars(&self, token: usize) -> Option<(usize, Offsets)> { Some(( self.token_to_sequence(token)?, self.offsets.get(token).copied()?, )) } /// Get the word that contains the token at the given index. pub fn token_to_word(&self, token: usize) -> Option<(usize, u32)> { Some(( self.token_to_sequence(token)?, self.words.get(token).copied().flatten()?, )) } /// Get the token that contains the given char. pub fn char_to_token(&self, pos: usize, sequence_id: usize) -> Option<usize> { let sequence_range = self.sequence_range(sequence_id); self.offsets .get(sequence_range.clone())? .iter() .position(|(start, end)| pos >= *start && pos < *end) .map(|pos| sequence_range.start + pos) } /// Get the word that contains the given char. pub fn char_to_word(&self, pos: usize, sequence_id: usize) -> Option<u32> { Some( self.char_to_token(pos, sequence_id) .and_then(|token| self.token_to_word(token))? .1, ) } /// Truncate the current `Encoding`. /// /// Panics if `stride >= max_len` pub fn truncate(&mut self, max_len: usize, stride: usize, direction: TruncationDirection) { let encoding_len = self.ids.len(); if max_len >= encoding_len { return; } if max_len == 0 { let o = std::mem::replace(self, Encoding::with_capacity(0)); self.overflowing.push(o); return; } assert!(stride < max_len, "`stride` must be strictly less than `max_len={}` (note that `max_len` may be shorter than the max length of the original model, as it subtracts the number of special characters", max_len); // When truncating, we lose the `sequence_ranges` information. self.sequence_ranges.clear(); let offset = max_len - stride; let mut end = false; let parts_ranges: Vec<(usize, usize)> = match direction { TruncationDirection::Right => (0..encoding_len) .step_by(offset) .filter_map(|start| { if !end { let stop = std::cmp::min(start + max_len, encoding_len); end = stop == encoding_len; Some((start, stop)) } else { None } }) .collect(), TruncationDirection::Left => (0..encoding_len) .rev() .step_by(offset) .filter_map(|stop| { let stop = stop + 1; let start = if stop < max_len { 0 } else { stop - max_len }; if start < stop && !end { end = start == 0; Some((start, stop)) } else { None } }) .collect(), }; let mut i = 0; let (start, stop) = parts_ranges[i]; let mut new_encoding = Encoding { ids: self.ids[start..stop].to_vec(), type_ids: self.type_ids[start..stop].to_vec(), tokens: self.tokens[start..stop].to_vec(), words: self.words[start..stop].to_vec(), offsets: self.offsets[start..stop].to_vec(), special_tokens_mask: self.special_tokens_mask[start..stop].to_vec(), attention_mask: self.attention_mask[start..stop].to_vec(), overflowing: vec![], sequence_ranges: HashMap::new(), }; loop { if i == parts_ranges.len() - 1 { break; } i += 1; let (start, stop) = parts_ranges[i]; new_encoding.overflowing.push(Encoding { ids: self.ids[start..stop].to_vec(), type_ids: self.type_ids[start..stop].to_vec(), tokens: self.tokens[start..stop].to_vec(), words: self.words[start..stop].to_vec(), offsets: self.offsets[start..stop].to_vec(), special_tokens_mask: self.special_tokens_mask[start..stop].to_vec(), attention_mask: self.attention_mask[start..stop].to_vec(), overflowing: vec![], sequence_ranges: HashMap::new(), }); } *self = new_encoding; } /// Merge all Encodings together pub fn merge<I: IntoIterator<Item = Encoding>>(encodings: I, growing_offsets: bool) -> Self { let mut encoding = Encoding::default(); // TODO this is suboptimal as we're doing this iteratively instead of preallocating // all the encodings sizes all at once and only copying into this preallocated vector // https://github.com/huggingface/tokenizers/pull/1049 // In order to fix, we just need to preallocate all vectors, then copy everything // into it (and deal with overlowings correctly) for sub in encodings { encoding.merge_with(sub, growing_offsets); } encoding } /// Merge ourself with the given `Encoding`. Happens in place. pub fn merge_with(&mut self, pair: Encoding, growing_offsets: bool) { // Handle merging the overflowing parts too: Combine them all // In most of the cases, we expect `pair.overflowing.len() == 0` let mut overflowings = vec![]; // 1. All our overflowings with all the others for self_o in &self.overflowing { // 1. The pair itself let mut n_encoding = self_o.clone(); n_encoding.merge_with(pair.clone(), growing_offsets); overflowings.push(n_encoding); // 2. Its overflowings (this should rarely happen...) for other_o in &pair.overflowing { let mut n_encoding = self_o.clone(); n_encoding.merge_with(other_o.clone(), growing_offsets); overflowings.push(n_encoding); } } // 2. Ourself with all the other overflowings (this should rarely happen too...) for other_o in &pair.overflowing { let mut n_encoding = self.clone(); n_encoding.merge_with(other_o.clone(), growing_offsets); overflowings.push(n_encoding); } // Finish by merging ourself with the other encoding let original_self_len = self.len(); // Must be before any modification to self.ids self.sequence_ranges .extend(pair.sequence_ranges.into_iter().map(|(seq_id, range)| { ( seq_id, original_self_len + range.start..original_self_len + range.end, ) })); self.ids.extend(pair.ids); self.type_ids.extend(pair.type_ids); self.tokens.extend(pair.tokens); self.words.extend(pair.words); let starting_offset = if growing_offsets { self.offsets.last().map_or(0, |o| o.1) } else { 0 }; self.offsets.extend( pair.offsets .into_iter() .map(|(start, end)| (start + starting_offset, end + starting_offset)) .collect::<Vec<_>>(), ); self.special_tokens_mask.extend(pair.special_tokens_mask); self.attention_mask.extend(pair.attention_mask); self.overflowing = overflowings; } pub fn pad( &mut self, target_length: usize, pad_id: u32, pad_type_id: u32, pad_token: &str, direction: PaddingDirection, ) { // Dispatch call to all the overflowings first self.overflowing.maybe_par_iter_mut().for_each(|encoding| { encoding.pad(target_length, pad_id, pad_type_id, pad_token, direction) }); // Then check if we should pad ourself if self.ids.len() >= target_length { // We just do nothing if the wanted padding length is smaller than us return; } let pad_length = target_length - self.ids.len(); match direction { PaddingDirection::Left => { self.ids = (0..pad_length) .map(|_| pad_id) .chain(self.ids.drain(..)) .collect(); self.type_ids = (0..pad_length) .map(|_| pad_type_id) .chain(self.type_ids.drain(..)) .collect(); self.tokens = (0..pad_length) .map(|_| pad_token.to_owned()) .chain(self.tokens.drain(..)) .collect(); self.words = (0..pad_length) .map(|_| None) .chain(self.words.drain(..)) .collect(); self.attention_mask = (0..pad_length) .map(|_| 0) .chain(self.attention_mask.drain(..)) .collect(); self.special_tokens_mask = (0..pad_length) .map(|_| 1) .chain(self.special_tokens_mask.drain(..)) .collect(); self.offsets = (0..pad_length) .map(|_| (0, 0)) .chain(self.offsets.drain(..)) .collect(); self.sequence_ranges .iter_mut() .for_each(|(_seq_id, range)| { *range = (range.start + pad_length)..(range.end + pad_length) }); } PaddingDirection::Right => { self.ids.extend((0..pad_length).map(|_| pad_id)); self.type_ids.extend((0..pad_length).map(|_| pad_type_id)); self.tokens .extend((0..pad_length).map(|_| pad_token.to_owned())); self.words.extend((0..pad_length).map(|_| None)); self.attention_mask.extend((0..pad_length).map(|_| 0)); self.special_tokens_mask.extend((0..pad_length).map(|_| 1)); self.offsets.extend((0..pad_length).map(|_| (0, 0))); } } } } impl std::iter::FromIterator<Encoding> for Encoding { fn from_iter<I: IntoIterator<Item = Encoding>>(iter: I) -> Self { Self::merge(iter, false) } } impl std::iter::FromIterator<(u32, String, (usize, usize), Option<u32>, u32)> for Encoding { fn from_iter<I: IntoIterator<Item = (u32, String, (usize, usize), Option<u32>, u32)>>( iter: I, ) -> Self { let items = iter.into_iter(); let (lower, upper) = items.size_hint(); let length = upper.unwrap_or(lower); let mut encoding = Self::with_capacity(length); for (id, token, offsets, word, type_id) in items { encoding.ids.push(id); encoding.tokens.push(token); encoding.offsets.push(offsets); encoding.type_ids.push(type_id); encoding.words.push(word); encoding.special_tokens_mask.push(0); encoding.attention_mask.push(1); } encoding } } #[cfg(test)] mod tests { use super::*; use std::iter::FromIterator; #[test] fn merge_encodings() { let mut a = Encoding { ids: vec![1], type_ids: vec![0], tokens: vec![String::from("Hello ")], words: vec![Some(0)], offsets: vec![(0, 6)], special_tokens_mask: vec![0], attention_mask: vec![1], ..Default::default() }; let b = Encoding { ids: vec![2], type_ids: vec![1], tokens: vec![String::from("World!")], words: vec![Some(0)], offsets: vec![(0, 6)], special_tokens_mask: vec![0], attention_mask: vec![1], ..Default::default() }; a.merge_with(b, true); assert_eq!( a, Encoding { ids: vec![1, 2], type_ids: vec![0, 1], tokens: vec![String::from("Hello "), String::from("World!")], words: vec![Some(0), Some(0)], offsets: vec![(0, 6), (6, 12)], special_tokens_mask: vec![0, 0], attention_mask: vec![1, 1], ..Default::default() } ); } #[test] fn truncate() { let mut a = Encoding { ids: vec![1, 2, 3], type_ids: vec![0, 0, 0], tokens: vec![ String::from("Hello"), String::from("World"), String::from("!"), ], words: vec![Some(0), Some(1), Some(2)], offsets: vec![(0, 5), (6, 11), (11, 12)], special_tokens_mask: vec![0, 0, 0], attention_mask: vec![1, 1, 1], ..Default::default() }; a.truncate(2, 0, TruncationDirection::Right); assert_eq!( a, Encoding { ids: vec![1, 2], type_ids: vec![0, 0], tokens: vec![String::from("Hello"), String::from("World")], words: vec![Some(0), Some(1)], offsets: vec![(0, 5), (6, 11)], special_tokens_mask: vec![0, 0], attention_mask: vec![1, 1], overflowing: vec![Encoding { ids: vec![3], type_ids: vec![0], tokens: vec![String::from("!")], words: vec![Some(2)], offsets: vec![(11, 12)], special_tokens_mask: vec![0], attention_mask: vec![1], ..Default::default() }], ..Default::default() } ); } #[test] fn truncate_to_empty() { let mut a = Encoding { ids: vec![1, 2, 3], type_ids: vec![0, 0, 0], tokens: vec![ String::from("Hello"), String::from("World"), String::from("!"), ], words: vec![Some(0), Some(1), Some(2)], offsets: vec![(0, 5), (6, 11), (11, 12)], special_tokens_mask: vec![0, 0, 0], attention_mask: vec![1, 1, 1], ..Default::default() }; a.truncate(0, 0, TruncationDirection::Right); assert_eq!( a, Encoding { overflowing: vec![Encoding { ids: vec![1, 2, 3], type_ids: vec![0, 0, 0], tokens: vec![ String::from("Hello"), String::from("World"), String::from("!"), ], words: vec![Some(0), Some(1), Some(2)], offsets: vec![(0, 5), (6, 11), (11, 12)], special_tokens_mask: vec![0, 0, 0], attention_mask: vec![1, 1, 1], overflowing: vec![], ..Default::default() }], ..Default::default() } ); } #[test] fn truncate_overflow_with_stride() { let mut enc = Encoding { ids: vec![1, 2, 3, 4, 5], type_ids: vec![0, 0, 0, 0, 0], tokens: vec![ String::from("42"), String::from("is"), String::from("the"), String::from("answer"), String::from("!"), ], words: vec![Some(0), Some(1), Some(2), Some(3), Some(4)], offsets: vec![(0, 2), (2, 4), (4, 7), (7, 13), (13, 14)], special_tokens_mask: vec![0, 0, 0, 0, 0], attention_mask: vec![1, 1, 1, 1, 1], overflowing: vec![], ..Default::default() }; enc.truncate(4, 2, TruncationDirection::Right); assert_eq!( enc, Encoding { ids: vec![1, 2, 3, 4], type_ids: vec![0, 0, 0, 0], tokens: vec![ String::from("42"), String::from("is"), String::from("the"), String::from("answer"), ], words: vec![Some(0), Some(1), Some(2), Some(3)], offsets: vec![(0, 2), (2, 4), (4, 7), (7, 13)], special_tokens_mask: vec![0, 0, 0, 0], attention_mask: vec![1, 1, 1, 1], overflowing: vec![Encoding { ids: vec![3, 4, 5], type_ids: vec![0, 0, 0], tokens: vec![ String::from("the"), String::from("answer"), String::from("!"), ], words: vec![Some(2), Some(3), Some(4)], offsets: vec![(4, 7), (7, 13), (13, 14)], special_tokens_mask: vec![0, 0, 0], attention_mask: vec![1, 1, 1], overflowing: vec![], ..Default::default() }], ..Default::default() } ); } #[test] fn truncate_left() { let mut a = Encoding { ids: vec![1, 2, 3], type_ids: vec![0, 0, 0], tokens: vec![ String::from("Hello"), String::from("World"), String::from("!"), ], words: vec![Some(0), Some(1), Some(2)], offsets: vec![(0, 5), (6, 11), (11, 12)], special_tokens_mask: vec![0, 0, 0], attention_mask: vec![1, 1, 1], ..Default::default() }; a.truncate(2, 0, TruncationDirection::Left); assert_eq!( a, Encoding { ids: vec![2, 3], type_ids: vec![0, 0], tokens: vec![String::from("World"), String::from("!")], words: vec![Some(1), Some(2)], offsets: vec![(6, 11), (11, 12)], special_tokens_mask: vec![0, 0], attention_mask: vec![1, 1], overflowing: vec![Encoding { ids: vec![1], type_ids: vec![0], tokens: vec![String::from("Hello")], words: vec![Some(0)], offsets: vec![(0, 5)], special_tokens_mask: vec![0], attention_mask: vec![1], ..Default::default() }], ..Default::default() } ); } #[test] fn mappings() { let encoding = Encoding { ids: vec![0; 11], // Needed for Encoding::len tokens: vec![ // First sequence: "He".into(), "llo".into(), "won".into(), "der".into(), "ful".into(), "friend".into(), "!".into(), // Second sequence: "How".into(), "are".into(), "you".into(), "?".into(), ], offsets: vec![ // First sequence: (0, 2), (2, 5), (7, 10), (10, 13), (13, 16), (17, 23), (23, 24), // Second sequence: (0, 3), (4, 7), (8, 11), (11, 12), ], words: vec![ // First sequence: Some(0), Some(0), Some(1), Some(1), Some(1), Some(2), Some(3), // Second sequence: Some(0), Some(1), Some(2), Some(3), ], sequence_ranges: HashMap::from_iter(vec![(0, 0..7), (1, 7..11)]), ..Default::default() }; assert_eq!(encoding.word_to_tokens(0, 0), Some((0, 2))); assert_eq!(encoding.word_to_tokens(1, 0), Some((2, 5))); assert_eq!(encoding.word_to_tokens(2, 0), Some((5, 6))); assert_eq!(encoding.word_to_tokens(3, 0), Some((6, 7))); assert_eq!(encoding.word_to_tokens(0, 1), Some((7, 8))); assert_eq!(encoding.word_to_tokens(1, 1), Some((8, 9))); assert_eq!(encoding.word_to_tokens(2, 1), Some((9, 10))); assert_eq!(encoding.word_to_tokens(3, 1), Some((10, 11))); assert_eq!(encoding.word_to_chars(0, 0), Some((0, 5))); assert_eq!(encoding.word_to_chars(1, 0), Some((7, 16))); assert_eq!(encoding.word_to_chars(0, 1), Some((0, 3))); assert_eq!(encoding.word_to_chars(1, 1), Some((4, 7))); assert_eq!(encoding.token_to_chars(0), Some((0, (0, 2)))); assert_eq!(encoding.token_to_chars(1), Some((0, (2, 5)))); assert_eq!(encoding.token_to_chars(7), Some((1, (0, 3)))); assert_eq!(encoding.token_to_chars(9), Some((1, (8, 11)))); assert_eq!(encoding.token_to_word(1), Some((0, 0))); assert_eq!(encoding.token_to_word(2), Some((0, 1))); assert_eq!(encoding.token_to_word(7), Some((1, 0))); assert_eq!(encoding.token_to_word(9), Some((1, 2))); assert_eq!(encoding.token_to_word(11), None); assert_eq!(encoding.char_to_token(3, 0), Some(1)); assert_eq!(encoding.char_to_token(8, 0), Some(2)); assert_eq!(encoding.char_to_token(16, 0), None); assert_eq!(encoding.char_to_token(23, 0), Some(6)); assert_eq!(encoding.char_to_token(2, 1), Some(7)); assert_eq!(encoding.char_to_token(9, 1), Some(9)); assert_eq!(encoding.char_to_word(3, 0), Some(0)); assert_eq!(encoding.char_to_word(8, 0), Some(1)); assert_eq!(encoding.char_to_word(16, 0), None); assert_eq!(encoding.char_to_word(23, 0), Some(3)); assert_eq!(encoding.char_to_word(2, 1), Some(0)); assert_eq!(encoding.char_to_word(9, 1), Some(2)); } #[test] fn padding() { let mut a = Encoding { ids: vec![1], type_ids: vec![0], tokens: vec![String::from("Hello ")], words: vec![Some(0)], offsets: vec![(0, 6)], special_tokens_mask: vec![0], attention_mask: vec![1], sequence_ranges: HashMap::from([(0, 0..1)]), ..Default::default() }; let target_length = 2; let pad_id = 99; let pad_type_id = 0; let pad_token = "[PAD]"; a.pad( target_length, pad_id, pad_type_id, pad_token, PaddingDirection::Left, ); assert_eq!(a.sequence_ranges, HashMap::from([(0, 1..2)])); } }
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/tokenizer/normalizer.rs
use crate::pattern::Pattern; use crate::{Offsets, Result}; use std::ops::{Bound, RangeBounds}; use unicode_normalization_alignments::UnicodeNormalization; use serde::{Deserialize, Serialize}; /// Add or Substract a signed isize on a usize. Makes sure of avoiding /// any substraction overflow, flooring at 0. macro_rules! apply_signed { ($origin: expr, $signed: expr) => { if $signed.is_positive() { $origin += $signed as usize; } else { let (result, overflow) = $origin.overflowing_sub(-($signed) as usize); $origin = if overflow { 0 } else { result }; } }; } /// The possible offsets referential #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum OffsetReferential { Original, Normalized, } /// Represents a Range usable by the NormalizedString to index its content. /// A Range can use indices relative to either the `Original` or the `Normalized` string #[derive(Debug, Clone, PartialEq, Eq)] pub enum Range<T: RangeBounds<usize> + Clone> { Original(T), Normalized(T), } #[allow(clippy::len_without_is_empty)] impl<T> Range<T> where T: RangeBounds<usize> + Clone, { /// Unwrap the underlying range pub fn unwrap(self) -> T { match self { Self::Original(r) => r, Self::Normalized(r) => r, } } /// Return the length of the current Range if not Unbounded pub fn len(&self) -> Option<usize> { let range = self.clone().unwrap(); let end = match range.end_bound() { Bound::Unbounded => None, Bound::Included(i) => Some(*i + 1), Bound::Excluded(i) => Some(*i), }?; match range.start_bound() { Bound::Unbounded => Some(end), Bound::Included(i) => Some(end - (*i + 1)), Bound::Excluded(i) => Some(end - *i), } } /// Converts the current Range to a `std::ops::Range<usize>`. This requires the `max_len` /// of the represented string (in chars, not bytes) in order to cover the case where the /// original provided range was unbounded pub fn into_full_range(self, max_len: usize) -> std::ops::Range<usize> { let range = self.unwrap(); let start = match range.start_bound() { Bound::Unbounded => 0, Bound::Included(i) => *i, Bound::Excluded(i) => *i + 1, }; let end = match range.end_bound() { Bound::Unbounded => max_len, Bound::Included(i) => *i + 1, Bound::Excluded(i) => *i, }; start..end } } /// Defines the expected behavior for the delimiter of a Split Pattern /// When splitting on `'-'` for example, with input `the-final--countdown`: /// - Removed => `[ "the", "final", "countdown" ]` /// - Isolated => `[ "the", "-", "final", "-", "-", "countdown" ]` /// - MergedWithPrevious => `[ "the-", "final-", "-", "countdown" ]` /// - MergedWithNext => `[ "the", "-final", "-", "-countdown" ]` /// - Contiguous => `[ "the", "-", "final", "--", "countdown" ]` #[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize, Eq)] pub enum SplitDelimiterBehavior { Removed, Isolated, MergedWithPrevious, MergedWithNext, Contiguous, } /// A `NormalizedString` takes care of processing an "original" string to modify /// it and obtain a "normalized" string. It keeps both version of the string, /// alignments information between both and provides an interface to retrieve /// ranges of each string, using offsets from any of them. /// /// It is possible to retrieve a part of the original string, by indexing it with /// offsets from the normalized one, and the other way around too. It is also /// possible to convert offsets from one referential to the other one easily. #[derive(Default, Debug, Clone, PartialEq, Eq)] pub struct NormalizedString { /// The original version of the string, before any modification original: String, /// The normalized version of the string, after all modifications normalized: String, /// Mapping from normalized string to original one: (start, end) for each /// byte of the normalized string alignments: Vec<(usize, usize)>, /// If this NormalizedString is a slice of a bigger one, we keep the track /// of the missing part, so that we can still give offsets from this original /// string. original_shift: usize, } impl NormalizedString { #[cfg(test)] pub(crate) fn new( original: String, normalized: String, alignments: Vec<(usize, usize)>, original_shift: usize, ) -> Self { Self { original, normalized, alignments, original_shift, } } /// Return the normalized string pub fn get(&self) -> &str { &self.normalized } /// Return the original string pub fn get_original(&self) -> &str { &self.original } /// Return the original offsets pub fn offsets_original(&self) -> Offsets { ( self.original_shift, self.original_shift + self.len_original(), ) } /// Convert the given offsets range from one referential to the other one: /// `Original => Normalized` or `Normalized => Original` /// /// Returns `None` when targeting something that is outside range pub fn convert_offsets<T>(&self, range: Range<T>) -> Option<std::ops::Range<usize>> where T: RangeBounds<usize> + Clone, { let len_original = self.len_original(); let len_normalized = self.len(); let (target, original) = match range { Range::Original(_) => (range.into_full_range(len_original), true), Range::Normalized(_) => (range.into_full_range(len_normalized), false), }; // If we target an empty range, let's return the same if target.start == target.end { return Some(target); } // If the target goes reverse, return None if target.start > target.end { return None; } // If we target 0..0 on an empty string, we want to expand to the entire equivalent if original && self.original.is_empty() && target == (0..0) { return Some(0..len_normalized); } if !original && self.normalized.is_empty() && target == (0..0) { return Some(0..len_original); } if original { let (mut start, mut end) = (None, None); self.alignments .iter() .enumerate() .take_while(|(_, alignment)| target.end >= alignment.1) .for_each(|(i, alignment)| { if start.is_none() && target.start <= alignment.0 { // For now, don't update if width == 0 if alignment.0 != alignment.1 { start = Some(i); } } if target.end >= alignment.1 { end = Some(i + 1); } }); match (start, end) { // Targetting inexistant beginning (Some(s), None) => Some(s..s), // Targetting inexistant end (None, Some(e)) => Some(e..e), // Found the range (Some(s), Some(e)) => Some(s..e), _ => None, } } else { self.alignments.get(target).and_then(expand_alignments) } } /// Return a range of the normalized string pub fn get_range<T>(&self, range: Range<T>) -> Option<&str> where T: RangeBounds<usize> + Clone, { match range { Range::Original(_) => self.normalized.get(self.convert_offsets(range)?), Range::Normalized(_) => self.normalized.get(range.into_full_range(self.len())), } } /// Return a range of the original string pub fn get_range_original<T>(&self, range: Range<T>) -> Option<&str> where T: RangeBounds<usize> + Clone, { match range { Range::Original(_) => self .original .get(range.into_full_range(self.len_original())), Range::Normalized(_) => self.original.get(self.convert_offsets(range)?), } } /// Validate the given range, to make sure it is on char boundaries fn validate_range<T: RangeBounds<usize> + Clone>( &self, range: Range<T>, ) -> Option<Range<std::ops::Range<usize>>> { match range { Range::Original(_) => { let r = range.into_full_range(self.original.len()); if !(self.original.is_char_boundary(r.start) && self.original.is_char_boundary(r.end)) { None } else { Some(Range::Original(r)) } } Range::Normalized(_) => { let r = range.into_full_range(self.normalized.len()); if !(self.normalized.is_char_boundary(r.start) && self.normalized.is_char_boundary(r.end)) { None } else { Some(Range::Normalized(r)) } } } } /// Return a slice of the current NormalizedString /// If the range is not on char boundaries, return None pub fn slice<T>(&self, range: Range<T>) -> Option<NormalizedString> where T: RangeBounds<usize> + Clone, { let full_range = self.validate_range(range)?; let (normalized_range, original_range) = match full_range { Range::Original(_) => ( self.convert_offsets(full_range.clone())?, full_range.clone().unwrap(), ), Range::Normalized(_) => ( full_range.clone().unwrap(), self.convert_offsets(full_range.clone())?, ), }; let n_shift = original_range.start; Some(Self { original: self .get_range_original(full_range.clone()) .unwrap_or_default() .into(), normalized: self.get_range(full_range).unwrap_or_default().into(), alignments: self .alignments .get(normalized_range)? .to_vec() .iter() .map(|(start, end)| (start - n_shift, end - n_shift)) .collect(), original_shift: self.original_shift + original_range.start, }) } /// Applies transformations to the current normalized version of the string, /// while updating the alignments. /// This method expect an Iterator yielding each char of the new normalized string /// with a `change` isize equals to: /// - `1` if this is a new char /// - `-N` if the char is right before N removed chars /// - `0` if the char is replacing the existing one /// Since it is possible that the normalized string doesn't include some of the characters at /// the beginning of the original one, we need an `initial_offset` which represents the number /// of removed chars at the very beginning. pub fn transform_range<T, I>(&mut self, range: Range<T>, dest: I, initial_offset: usize) where T: RangeBounds<usize> + Clone, I: IntoIterator<Item = (char, isize)>, { let n_range = match range { Range::Normalized(_) => range.into_full_range(self.len()), Range::Original(_) => match self.convert_offsets(range) { Some(range) => range, None => return, }, }; trace!( "===== transform_range call with {:?} (initial_offset: {}) =====", n_range, initial_offset ); // Retrieve the original characters that are being replaced. This let us // compute the change in byte sizes along the way. let mut replaced_normalized = self.normalized[n_range.clone()] .chars() .collect::<Vec<_>>() .into_iter(); let initial_removed: usize = (&mut replaced_normalized) .take(initial_offset) .map(|c| c.len_utf8()) .sum(); let mut offset = (initial_removed + n_range.start) as isize; let mut alignments = Vec::with_capacity(n_range.len()); trace!("=> Applying transformations"); let normalized = dest .into_iter() .map(|(c, changes)| { trace!( "### {:?} with size {}: {} with offset {} ###", c, c.len_utf8(), match changes { 0 => "Replacing".into(), ch if ch > 0 => "Adding".into(), ch if ch < 0 => format!("Replacing + removing {} following chars", ch), _ => "Undefined".into(), }, offset ); let idx = offset as usize; let align = if changes.is_positive() { if idx < 1 { (0, 0) } else { // This is a newly inserted character, so it shares the same alignment // than the previous one self.alignments[idx - 1] } } else { self.alignments[idx] }; // If we are replacing a character, find it and compute the change in size let replaced_char = if !changes.is_positive() { replaced_normalized.next() } else { None }; let replaced_char_size = replaced_char.map_or(0, |c| c.len_utf8()); let replaced_char_size_change = c.len_utf8() as isize - replaced_char_size as isize; if let Some(ref replaced_char) = replaced_char { trace!( "Replacing char {:?} - with a change in size: {}", replaced_char, replaced_char_size_change ); } // If we are removing some characters, find them too let total_bytes_to_remove = if changes.is_negative() { (&mut replaced_normalized) .take(-changes as usize) .map(|c| c.len_utf8()) .sum() } else { 0 }; trace!("Total bytes to remove: {}", total_bytes_to_remove); // Keep track of the changes for next offsets offset += replaced_char_size as isize; offset += total_bytes_to_remove as isize; trace!("New offset: {}", offset); trace!("New normalized alignment: {}x {:?}", c.len_utf8(), align); alignments.extend((0..c.len_utf8()).map(|_| align)); // Then we keep only the char for string reconstruction c }) .collect::<String>(); self.alignments.splice(n_range.clone(), alignments); unsafe { self.normalized .as_mut_vec() .splice(n_range, normalized.bytes()); } } /// Applies transformations to the current normalized version of the string, /// while updating the alignments. /// This method expect an Iterator yielding each char of the new normalized string /// with a `change` isize equals to: /// - `1` if this is a new char /// - `-N` if the char is right before N removed chars /// - `0` if the char is replacing the existing one /// Since it is possible that the normalized string doesn't include some of the characters at /// the beginning of the original one, we need an `initial_offset` which represents the number /// of removed chars at the very beginning. pub fn transform<I>(&mut self, dest: I, initial_offset: usize) where I: IntoIterator<Item = (char, isize)>, { self.transform_range(Range::Original(..), dest, initial_offset) } /// Applies NFD normalization pub fn nfd(&mut self) -> &mut Self { self.transform(self.get().to_owned().nfd(), 0); self } /// Applies NFKD normalization pub fn nfkd(&mut self) -> &mut Self { self.transform(self.get().to_owned().nfkd(), 0); self } /// Applies NFC normalization pub fn nfc(&mut self) -> &mut Self { self.transform(self.get().to_owned().nfc(), 0); self } /// Applies NFKC normalization pub fn nfkc(&mut self) -> &mut Self { self.transform(self.get().to_owned().nfkc(), 0); self } /// Applies filtering over our characters pub fn filter<F: Fn(char) -> bool>(&mut self, keep: F) -> &mut Self { let mut removed: isize = 0; let mut removed_start: usize = 0; let mut transforms = Vec::with_capacity(self.normalized.len()); let mut last_c = None; for c in self.normalized.chars() { if keep(c) { match last_c { Some(lc) => { transforms.push((lc, -removed)); } None => { removed_start = removed as usize; } } last_c = Some(c); removed = 0; } else { removed += 1; } } if let Some(lc) = last_c { transforms.push((lc, -removed)); } self.transform(transforms, removed_start); self } /// Prepend the given string to ourself pub fn prepend(&mut self, s: &str) -> &mut Self { if let Some(next) = self.normalized.chars().next() { let transformations = s .chars() .enumerate() .map(|(i, c)| (c, isize::from(i != 0))) .chain(std::iter::once((next, 1))); self.transform_range(Range::Normalized(0..next.len_utf8()), transformations, 0); } self } /// Append the given string to ourself pub fn append(&mut self, s: &str) -> &mut Self { if let Some((b, prev)) = self.normalized.char_indices().last() { let transformations = std::iter::once((prev, 0)).chain(s.chars().map(|c| (c, 1))); self.transform_range(Range::Normalized(b..), transformations, 0); } self } /// Map our characters pub fn map<F: Fn(char) -> char>(&mut self, map: F) -> &mut Self { let transformations = self .normalized .chars() .map(|c| (map(c), 0)) .collect::<Vec<_>>(); self.transform(transformations, 0); self } /// Calls the given function for each characters pub fn for_each<F: FnMut(char)>(&self, foreach: F) -> &Self { self.normalized.chars().for_each(foreach); self } /// Lowercase pub fn lowercase(&mut self) -> &mut Self { let mut new_chars: Vec<(char, isize)> = vec![]; self.for_each(|c| { c.to_lowercase().enumerate().for_each(|(index, c)| { new_chars.push((c, isize::from(index > 0))); }) }); self.transform(new_chars.into_iter(), 0); self } /// Uppercase pub fn uppercase(&mut self) -> &mut Self { let mut new_chars: Vec<(char, isize)> = vec![]; self.for_each(|c| { c.to_uppercase().enumerate().for_each(|(index, c)| { new_chars.push((c, isize::from(index > 0))); }) }); self.transform(new_chars.into_iter(), 0); self } /// Replace anything that matches the pattern with the given content. pub fn replace<P: Pattern>(&mut self, pattern: P, content: &str) -> Result<()> { let mut offset: isize = 0; pattern .find_matches(&self.normalized)? .into_iter() .for_each(|((start, end), is_match)| { if is_match { let mut range = start..end; apply_signed!(range.start, offset); apply_signed!(range.end, offset); let mut new_len = 0; let removed_chars = self.normalized[range.clone()].chars().count(); self.transform_range( Range::Normalized(range), content.chars().map(|c| { new_len += c.len_utf8(); (c, 1) }), removed_chars, ); let old_len = end - start; offset += new_len as isize - old_len as isize; } }); Ok(()) } /// Clear the normalized part of the string pub fn clear(&mut self) -> usize { let len = self.len(); self.transform(std::iter::empty(), len); len } /// Split the current string in many subparts. Specify what to do with the /// delimiter. /// /// ## Splitting Behavior for the delimiter /// /// The behavior can be one of the followings: /// When splitting on `'-'` for example, with input `the-final--countdown`: /// - Removed => `[ "the", "", "final", "", "", "countdown" ]` /// - Isolated => `[ "the", "-", "final", "-", "-", "countdown" ]` /// - MergedWithPrevious => `[ "the-", "final-", "-", "countdown" ]` /// - MergedWithNext => `[ "the", "-final", "-", "-countdown" ]` pub fn split<P: Pattern>( &self, pattern: P, behavior: SplitDelimiterBehavior, ) -> Result<Vec<NormalizedString>> { let matches = pattern.find_matches(&self.normalized)?; // Process the matches according to the selected behavior: Vec<(Offsets, should_remove)> use SplitDelimiterBehavior::*; let splits = match behavior { Isolated => matches .into_iter() .map(|(offsets, _)| (offsets, false)) .collect(), Removed => matches, Contiguous => { let mut previous_match = false; matches .into_iter() .fold(vec![], |mut acc, (offsets, is_match)| { if is_match == previous_match { if let Some(((_, end), _)) = acc.last_mut() { *end = offsets.1; } else { acc.push((offsets, false)); } } else { acc.push((offsets, false)); } previous_match = is_match; acc }) } MergedWithPrevious => { let mut previous_match = false; matches .into_iter() .fold(vec![], |mut acc, (offsets, is_match)| { if is_match && !previous_match { if let Some(((_, end), _)) = acc.last_mut() { *end = offsets.1; } else { acc.push((offsets, false)); } } else { acc.push((offsets, false)); } previous_match = is_match; acc }) } MergedWithNext => { let mut previous_match = false; let mut matches = matches .into_iter() .rev() .fold(vec![], |mut acc, (offsets, is_match)| { if is_match && !previous_match { if let Some(((start, _), _)) = acc.last_mut() { *start = offsets.0; } else { acc.push((offsets, false)); } } else { acc.push((offsets, false)); } previous_match = is_match; acc }); matches.reverse(); matches } }; // Then we split according to the computed splits Ok(splits .into_iter() .filter_map(|(offsets, remove)| { if !remove { Some( self.slice(Range::Normalized(offsets.0..offsets.1)) .expect("NormalizedString bad split"), ) } else { None } }) .collect()) } /// Remove any leading space(s) of the normalized string pub fn lstrip(&mut self) -> &mut Self { self.lrstrip(true, false) } /// Remove any trailing space(s) of the normalized string pub fn rstrip(&mut self) -> &mut Self { self.lrstrip(false, true) } /// Remove any leading and trailing space(s) of the normalized string pub fn strip(&mut self) -> &mut Self { self.lrstrip(true, true) } fn lrstrip(&mut self, left: bool, right: bool) -> &mut Self { let leading_spaces = if left { self.get().chars().take_while(|c| c.is_whitespace()).count() } else { 0 }; let trailing_spaces = if right { self.get() .chars() .rev() .take_while(|c| c.is_whitespace()) .count() } else { 0 }; if leading_spaces > 0 || trailing_spaces > 0 { let count = self.get().chars().count(); let transformation = self .normalized .chars() .enumerate() .filter_map(|(i, c)| { if i < leading_spaces || i >= count - trailing_spaces { None } else if i == self.len() - trailing_spaces - 1 { Some((c, -(trailing_spaces as isize))) } else { Some((c, 0)) } }) .collect::<Vec<_>>(); self.transform(transformation, leading_spaces); } self } /// Returns the length of the normalized string (counting chars not bytes) pub fn len(&self) -> usize { self.normalized.len() } /// Returns the length of the original string (counting chars not bytes) pub fn len_original(&self) -> usize { self.original.len() } /// Whether empty pub fn is_empty(&self) -> bool { self.normalized.is_empty() } /// Recalculate original alignments #[allow(dead_code)] pub(crate) fn alignments_original(&self) -> Vec<(usize, usize)> { // Start, end are in alignments // offset, length are in alignments_original let mut alignments_original = Vec::with_capacity(self.original.len()); // Eventual gap before first group let start = self.alignments[0].0; if start != 0 { alignments_original.extend(vec![(0, 0); start]); } let mut last = (&self.alignments[0].0, &self.alignments[0].1); let mut offset = 0; let mut length = 0; for (start, end) in &self.alignments { if last == (start, end) { // This is the same group length += 1; } else { // This is a new group if start < last.1 { panic!("We can't have overlapping ranges."); } // Add the old group alignments_original.extend(vec![(offset, offset + length); last.1 - last.0]); offset += length; length = 1; // Eventual gap between the 2 groups alignments_original.extend(vec![(offset, offset); start - last.1]); } last = (start, end); } // Add the last group alignments_original.extend(vec![(offset, offset + length); last.1 - last.0]); // Add eventual last gap offset += length; alignments_original.extend(vec![ (offset, offset); self.original.len() - alignments_original.len() ]); // assert_eq!(alignments_original.len(), self.original.len()); alignments_original } } /// Returns the range covered by a slice of alignments fn expand_alignments(alignments: &[(usize, usize)]) -> Option<std::ops::Range<usize>> { if alignments.is_empty() { None } else { let start = alignments[0].0; let end = alignments[alignments.len() - 1].1; Some(start..end) } } /// Returns a range of the given string slice, by indexing chars instead of bytes pub fn get_range_of<T: RangeBounds<usize>>(s: &str, range: T) -> Option<&str> { let len = s.chars().count(); let start = match range.start_bound() { Bound::Unbounded => 0, Bound::Included(i) => *i, Bound::Excluded(i) => *i + 1, }; let end = match range.end_bound() { Bound::Unbounded => len, Bound::Included(i) => *i + 1, Bound::Excluded(i) => *i, }; if start == 0 && end == 0 { Some(&s[0..0]) } else if start >= len || end > len || start >= end { None } else { let start_b = s.char_indices().map(|(i, _)| i).nth(start).unwrap_or(0); let end_b = s.char_indices().map(|(i, _)| i).nth(end).unwrap_or(s.len()); Some(&s[start_b..end_b]) } } /// Convert the given range from bytes to char pub fn bytes_to_char(s: &str, range: std::ops::Range<usize>) -> Option<std::ops::Range<usize>> { let (mut start, mut end) = if range == (0..0) { (Some(0), Some(0)) } else { (None, None) }; s.char_indices() .enumerate() .take_while(|(_, (b, _))| *b <= range.end) .filter(|(_, (b, _))| *b >= range.start) .for_each(|(i, (b, c))| { if b == range.start { start = Some(i); } if b == range.end { end = Some(i); } if b + c.len_utf8() == range.end { end = Some(i + 1); } }); Some(start?..end?) } /// Convert the given range from char to bytes pub fn char_to_bytes(s: &str, range: std::ops::Range<usize>) -> Option<std::ops::Range<usize>> { let (mut start, mut end) = if range == (0..0) { (Some(0), Some(0)) } else { (None, None) }; if range.start == range.end { s.char_indices() .skip(range.start) .take(1) .for_each(|(b, _)| { start = Some(b); end = Some(b); }); } else { s.char_indices() .skip(range.start) .take(range.end - range.start) .for_each(|(b, c)| { if start.is_none() { start = Some(b); } end = Some(b + c.len_utf8()); }); } Some(start?..end?) } impl From<String> for NormalizedString { fn from(s: String) -> Self { let alignments = s .char_indices() .flat_map(|(b, c)| { let len = c.len_utf8(); (0..len).map(move |_| (b, b + len)) }) .collect::<Vec<_>>(); Self { original: s.clone(), normalized: s, alignments, original_shift: 0, } } } impl From<&str> for NormalizedString { fn from(s: &str) -> Self { Self::from(s.to_owned()) } } #[cfg(test)] mod tests { use super::*; use regex::Regex; use unicode_categories::UnicodeCategories; #[test] fn nfd_adds_new_chars() { let mut n = NormalizedString::from("élégant"); n.nfd(); assert_eq!( &n.alignments, &[ (0, 2), (0, 2), (0, 2), (2, 3), (3, 5), (3, 5), (3, 5), (5, 6), (6, 7), (7, 8), (8, 9) ] ); assert_eq!( n.alignments_original(), vec![ (0, 3), (0, 3), (3, 4), (4, 7), (4, 7), (7, 8), (8, 9), (9, 10), (10, 11) ] ); } #[test] fn remove_chars_added_by_nfd() { let mut n = NormalizedString::from("élégant"); n.nfd().filter(|c| !c.is_mark_nonspacing()); assert_eq!(n.get(), "elegant"); assert_eq!( &n.alignments, &[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 9)] ); assert_eq!( n.alignments_original(), vec![ (0, 1), (0, 1), (1, 2), (2, 3), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7) ] ); } #[test] fn remove_chars() { let mut n = NormalizedString::from("élégant"); n.filter(|c| c != 'n'); assert_eq!(n.get(), "élégat"); assert_eq!( &n.alignments, &[ (0, 2), (0, 2), (2, 3), (3, 5), (3, 5), (5, 6), (6, 7), // Skipped range (8, 9) ] ); assert_eq!( n.alignments_original(), vec![ (0, 2), (0, 2), (2, 3), (3, 5), (3, 5), (5, 6), (6, 7), (7, 7), // Eaten n (7, 8) ] ); } #[test] fn mixed_addition_and_removal() { let mut n = NormalizedString::from("élégant"); n.nfd().filter(|c| !c.is_mark_nonspacing() && c != 'n'); assert_eq!(n.get(), "elegat"); assert_eq!( &n.alignments, &[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (8, 9)] ); assert_eq!( n.alignments_original(), vec![ (0, 1), (0, 1), (1, 2), (2, 3), (2, 3), (3, 4), // g (4, 5), // a (5, 5), // Eaten n (5, 6) ] ); } #[test] fn range_conversion() { let mut n = NormalizedString::from(" __Hello__ "); n.filter(|c| !c.is_whitespace()).lowercase(); let hello_n = n.convert_offsets(Range::Original(6..11)); assert_eq!(hello_n, Some(2..7)); assert_eq!( n.get_range(Range::Normalized(hello_n.clone().unwrap())), Some("hello") ); assert_eq!( n.get_range_original(Range::Normalized(hello_n.unwrap())), Some("Hello") ); assert_eq!(n.get_range(Range::Original(6..11)), Some("hello")); assert_eq!(n.get_range_original(Range::Original(6..11)), Some("Hello")); // Make sure we get None only in specific cases assert_eq!(n.convert_offsets(Range::Original(0..0)), Some(0..0)); assert_eq!(n.convert_offsets(Range::Original(3..3)), Some(3..3)); assert_eq!(n.convert_offsets(Range::Original(15..)), Some(9..9)); assert_eq!(n.convert_offsets(Range::Original(16..)), Some(16..16)); assert_eq!(n.convert_offsets(Range::Original(17..)), None); assert_eq!(n.convert_offsets(Range::Normalized(0..0)), Some(0..0)); assert_eq!(n.convert_offsets(Range::Normalized(3..3)), Some(3..3)); assert_eq!(n.convert_offsets(Range::Normalized(9..)), Some(9..9)); assert_eq!(n.convert_offsets(Range::Normalized(10..)), None); } #[test] fn original_range() { let mut n = NormalizedString::from("Hello_______ World!"); n.filter(|c| c != '_').lowercase(); let world_n = n.get_range(Range::Normalized(6..11)).unwrap(); let world_o = n.get_range_original(Range::Normalized(6..11)).unwrap(); assert_eq!(world_n, "world"); assert_eq!(world_o, "World"); let original_range = Range::Original(n.convert_offsets(Range::Normalized(6..11)).unwrap()); assert_eq!(n.get_range(original_range.clone()).unwrap(), "world"); assert_eq!( n.get_range_original(original_range.clone()).unwrap(), "World" ); assert_eq!(original_range.into_full_range(n.len_original()), 13..18); } #[test] fn added_around_edges() { let mut n = NormalizedString::from("Hello"); n.transform( vec![ (' ', 1), ('H', 0), ('e', 0), ('l', 0), ('l', 0), ('o', 0), (' ', 1), ] .into_iter(), 0, ); assert_eq!(&n.normalized, " Hello "); assert_eq!( n.get_range_original(Range::Normalized(1..n.normalized.len() - 1)), Some("Hello") ); } #[test] fn added_characters_alignment() { let mut n = NormalizedString::from("野口 No"); n.transform( n.get().to_owned().chars().flat_map(|c| { if (c as usize) > 0x4E00 { vec![(' ', 0), (c, 1), (' ', 1)] } else { vec![(c, 0)] } }), 0, ); assert_eq!( n, NormalizedString { original: "野口 No".into(), normalized: " 野 口 No".into(), alignments: vec![ (0, 3), (0, 3), (0, 3), (0, 3), (0, 3), (3, 6), (3, 6), (3, 6), (3, 6), (3, 6), (6, 7), (7, 8), (8, 9) ], original_shift: 0 } ); assert_eq!( n.alignments_original(), vec![ (0, 5), (0, 5), (0, 5), (5, 10), (5, 10), (5, 10), (10, 11), (11, 12), (12, 13) ] ); } #[test] fn remove_at_beginning() { let mut n = NormalizedString::from(" Hello"); n.filter(|c| !c.is_whitespace()); assert_eq!( n.get_range_original(Range::Normalized(1.."Hello".len())), Some("ello") ); assert_eq!( n.get_range_original(Range::Normalized(0..n.normalized.len())), Some("Hello") ); } #[test] fn remove_at_end() { let mut n = NormalizedString::from("Hello "); n.filter(|c| !c.is_whitespace()); assert_eq!(n.get_range_original(Range::Normalized(0..4)), Some("Hell")); assert_eq!( n.get_range_original(Range::Normalized(0..n.normalized.len())), Some("Hello") ); } #[test] fn removed_around_both_edges() { let mut n = NormalizedString::from(" Hello "); n.filter(|c| !c.is_whitespace()); assert_eq!(&n.normalized, "Hello"); assert_eq!( n.get_range_original(Range::Normalized(0.."Hello".len())), Some("Hello") ); assert_eq!( n.get_range_original(Range::Normalized(1.."Hell".len())), Some("ell") ); } #[test] fn lstrip() { let mut n = NormalizedString::from(" This is an example "); n.lstrip(); assert_eq!(&n.normalized, "This is an example "); assert_eq!( n.get_range_original(Range::Normalized(0..n.normalized.len())), Some("This is an example ") ); } #[test] fn rstrip() { let mut n = NormalizedString::from(" This is an example "); n.rstrip(); assert_eq!(&n.normalized, " This is an example"); assert_eq!( n.get_range_original(Range::Normalized(0..n.normalized.len())), Some(" This is an example") ); } #[test] fn strip() { let mut n = NormalizedString::from(" This is an example "); n.strip(); assert_eq!(&n.normalized, "This is an example"); assert_eq!( n.get_range_original(Range::Normalized(0..n.normalized.len())), Some("This is an example") ); } #[test] fn strip_unicode() { let mut n = NormalizedString::from(" 你好asa \n"); n.strip(); assert_eq!(&n.normalized, "你好asa"); assert_eq!( n.get_range_original(Range::Normalized(0..n.normalized.len())), Some("你好asa") ); } #[test] fn prepend() { let mut n = NormalizedString::from("there"); n.prepend("Hey "); assert_eq!(&n.normalized, "Hey there"); assert_eq!( n.alignments, vec![ (0, 1), (0, 1), (0, 1), (0, 1), (0, 1), (1, 2), (2, 3), (3, 4), (4, 5) ] ); assert_eq!(n.convert_offsets(Range::Normalized(0..4)), Some(0..1)); } #[test] fn append() { let mut n = NormalizedString::from("Hey"); n.append(" there"); assert_eq!(&n.normalized, "Hey there"); assert_eq!( n.alignments, vec![ (0, 1), (1, 2), (2, 3), (2, 3), (2, 3), (2, 3), (2, 3), (2, 3), (2, 3) ] ); assert_eq!( n.convert_offsets(Range::Normalized(3.." there".len())), Some(2..3) ); } #[test] fn get_range() { let s = String::from("Hello my name is John 👋"); assert_eq!(get_range_of(&s, ..), Some(&s[..])); assert_eq!(get_range_of(&s, 17..), Some("John 👋")); } #[test] fn slice() { let mut s = NormalizedString::from("𝔾𝕠𝕠𝕕 𝕞𝕠𝕣𝕟𝕚𝕟𝕘"); s.nfkc(); let original_slice = s.slice(Range::Original(0..4)).unwrap(); assert_eq!(original_slice.get(), "G"); assert_eq!(original_slice.get_original(), "𝔾"); let normalized_slice = s.slice(Range::Normalized(0..4)).unwrap(); assert_eq!(normalized_slice.get(), "Good"); assert_eq!(normalized_slice.get_original(), "𝔾𝕠𝕠𝕕"); // Make sure the sliced NormalizedString is still aligned as expected let mut s = NormalizedString::from(" Good Morning! "); s.strip(); // If we keep the whole slice let slice = s.slice(Range::Original(..)).unwrap(); assert_eq!( slice.get_range_original(Range::Normalized(0..4)), Some("Good") ); let slice = s.slice(Range::Normalized(..)).unwrap(); assert_eq!( slice.get_range_original(Range::Normalized(0..4)), Some("Good") ); // If we keep after the modified piece let slice = s.slice(Range::Original(4..15)).unwrap(); assert_eq!( slice.get_range_original(Range::Normalized(0..3)), Some("ood") ); // If we keep only the modified piece let slice = s.slice(Range::Original(3..16)).unwrap(); assert_eq!( slice.get_range_original(Range::Normalized(0..4)), Some("Good") ); } #[test] fn replace() { // Simple let mut s = NormalizedString::from(" Hello friend "); s.replace(' ', "_").unwrap(); assert_eq!(s.get(), "_Hello___friend_"); let mut s = NormalizedString::from("aaaab"); s.replace('a', "b").unwrap(); assert_eq!(s.get(), "bbbbb"); // Overlapping let mut s = NormalizedString::from("aaaab"); s.replace("aaa", "b").unwrap(); assert_eq!(s.get(), "bab"); // Regex let mut s = NormalizedString::from(" Hello friend "); let re = Regex::new(r"\s+").unwrap(); s.replace(&re, "_").unwrap(); assert_eq!(s.get(), "_Hello_friend_"); } #[test] fn split() { use SplitDelimiterBehavior::*; let s = NormalizedString::from("The-final--countdown"); let test = |behavior: SplitDelimiterBehavior, result: Vec<&str>| { let splits = s.split('-', behavior).unwrap(); assert_eq!(splits.iter().map(|n| n.get()).collect::<Vec<_>>(), result); }; test(Removed, vec!["The", "final", "countdown"]); test(Isolated, vec!["The", "-", "final", "-", "-", "countdown"]); test(MergedWithPrevious, vec!["The-", "final-", "-", "countdown"]); test(MergedWithNext, vec!["The", "-final", "-", "-countdown"]); test(Contiguous, vec!["The", "-", "final", "--", "countdown"]); } #[test] fn transform_range_single_bytes() { let s = NormalizedString::from("Hello friend"); // Removing at the beginning let mut current = s.clone(); current.transform_range(Range::Original(0..4), vec![('Y', 0)], 3); assert_eq!( current, NormalizedString { original: "Hello friend".into(), normalized: "Yo friend".into(), alignments: vec![ (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11), (11, 12) ], original_shift: 0, } ); assert_eq!( current.alignments_original(), vec![ (0, 0), (0, 0), (0, 0), (0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9) ] ); // Removing in the middle let mut current = s.clone(); current.transform_range( Range::Original(3..10), vec![('_', 0), ('F', 0), ('R', -2)], 2, ); assert_eq!( current, NormalizedString { original: "Hello friend".into(), normalized: "Hel_FRnd".into(), alignments: vec![ (0, 1), (1, 2), (2, 3), (5, 6), (6, 7), (7, 8), (10, 11), (11, 12) ], original_shift: 0, } ); assert_eq!( current.alignments_original(), vec![ (0, 1), (1, 2), (2, 3), (3, 3), (3, 3), (3, 4), (4, 5), (5, 6), (6, 6), (6, 6), (6, 7), (7, 8) ] ); // Removing at the end let mut current = s.clone(); current.transform_range(Range::Original(5..), vec![('_', 0), ('F', -5)], 0); assert_eq!( current, NormalizedString { original: "Hello friend".into(), normalized: "Hello_F".into(), alignments: vec![(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7)], original_shift: 0, } ); assert_eq!( current.alignments_original(), vec![ (0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 7), (7, 7), (7, 7), (7, 7), (7, 7) ] ); // Adding at the beginning let mut current = s.clone(); current.transform_range(Range::Original(0..1), vec![('H', 1), ('H', 0)], 0); assert_eq!( current, NormalizedString { original: "Hello friend".into(), normalized: "HHello friend".into(), alignments: vec![ (0, 0), (0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11), (11, 12) ], original_shift: 0, } ); assert_eq!( current.alignments_original(), vec![ (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11), (11, 12), (12, 13) ] ); // Equivalent to the previous one let mut current = s.clone(); current.transform_range(Range::Original(0..0), vec![('H', 1)], 0); assert_eq!( current, NormalizedString { original: "Hello friend".into(), normalized: "HHello friend".into(), alignments: vec![ (0, 0), (0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11), (11, 12) ], original_shift: 0, } ); assert_eq!( current.alignments_original(), vec![ (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11), (11, 12), (12, 13) ] ); // Adding as part of the first character let mut current = s.clone(); current.transform_range(Range::Original(0..1), vec![('H', 0), ('H', 1)], 0); assert_eq!( current, NormalizedString { original: "Hello friend".into(), normalized: "HHello friend".into(), alignments: vec![ (0, 1), (0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11), (11, 12) ], original_shift: 0, } ); assert_eq!( current.alignments_original(), vec![ (0, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11), (11, 12), (12, 13) ] ); // Adding in the middle let mut current = s.clone(); current.transform_range( Range::Original(5..6), vec![('_', 0), ('m', 1), ('y', 1), ('_', 1)], 0, ); assert_eq!( current, NormalizedString { original: "Hello friend".into(), normalized: "Hello_my_friend".into(), alignments: vec![ (0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (5, 6), (5, 6), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11), (11, 12) ], original_shift: 0, } ); assert_eq!( current.alignments_original(), vec![ (0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 9), (9, 10), (10, 11), (11, 12), (12, 13), (13, 14), (14, 15) ] ); // Adding at the end let mut current = s; current.transform_range(Range::Original(11..), vec![('d', 0), ('_', 1), ('!', 1)], 0); assert_eq!( current, NormalizedString { original: "Hello friend".into(), normalized: "Hello friend_!".into(), alignments: vec![ (0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11), (11, 12), (11, 12), (11, 12) ], original_shift: 0, } ); assert_eq!( current.alignments_original(), vec![ (0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11), (11, 14) ] ); } #[test] fn transform_range_multiple_bytes() { let s = NormalizedString::from("𝔾𝕠𝕠𝕕"); // Removing at the beginning let mut current = s.clone(); current.transform_range(Range::Original(0..8), vec![('G', -1)], 0); assert_eq!( current, NormalizedString { original: "𝔾𝕠𝕠𝕕".into(), normalized: "G𝕠𝕕".into(), alignments: vec![ (0, 4), (8, 12), (8, 12), (8, 12), (8, 12), (12, 16), (12, 16), (12, 16), (12, 16) ], original_shift: 0, } ); assert_eq!( current.alignments_original(), vec![ (0, 1), (0, 1), (0, 1), (0, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 5), (1, 5), (1, 5), (1, 5), (5, 9), (5, 9), (5, 9), (5, 9) ] ); assert_eq!(current.get_range(Range::Original(0..8)).unwrap(), "G"); assert_eq!(current.get_range(Range::Original(0..4)).unwrap(), "G"); assert_eq!( current.get_range_original(Range::Original(0..4)).unwrap(), "𝔾" ); assert_eq!( current.get_range_original(Range::Original(0..8)).unwrap(), "𝔾𝕠" ); // Removing in the middle let mut current = s.clone(); current.transform_range(Range::Original(4..12), vec![('o', -1)], 0); assert_eq!( current, NormalizedString { original: "𝔾𝕠𝕠𝕕".into(), normalized: "𝔾o𝕕".into(), alignments: vec![ (0, 4), (0, 4), (0, 4), (0, 4), (4, 8), (12, 16), (12, 16), (12, 16), (12, 16) ], original_shift: 0, } ); assert_eq!( current.alignments_original(), vec![ (0, 4), (0, 4), (0, 4), (0, 4), (4, 5), (4, 5), (4, 5), (4, 5), (5, 5), (5, 5), (5, 5), (5, 5), (5, 9), (5, 9), (5, 9), (5, 9) ] ); // Removing at the end let mut current = s.clone(); current.transform_range(Range::Original(12..), vec![('d', 0), ('!', 1)], 0); assert_eq!( current, NormalizedString { original: "𝔾𝕠𝕠𝕕".into(), normalized: "𝔾𝕠𝕠d!".into(), alignments: vec![ (0, 4), (0, 4), (0, 4), (0, 4), (4, 8), (4, 8), (4, 8), (4, 8), (8, 12), (8, 12), (8, 12), (8, 12), (12, 16), (12, 16) ], original_shift: 0, } ); // Adding at the beginning let mut current = s.clone(); current.transform_range(Range::Original(0..4), vec![('_', 1), ('𝔾', 0)], 0); assert_eq!( current, NormalizedString { original: "𝔾𝕠𝕠𝕕".into(), normalized: "_𝔾𝕠𝕠𝕕".into(), alignments: vec![ (0, 0), (0, 4), (0, 4), (0, 4), (0, 4), (4, 8), (4, 8), (4, 8), (4, 8), (8, 12), (8, 12), (8, 12), (8, 12), (12, 16), (12, 16), (12, 16), (12, 16) ], original_shift: 0, } ); assert_eq!( current.alignments_original(), vec![ (1, 5), (1, 5), (1, 5), (1, 5), (5, 9), (5, 9), (5, 9), (5, 9), (9, 13), (9, 13), (9, 13), (9, 13), (13, 17), (13, 17), (13, 17), (13, 17) ] ); assert_eq!(current.get_range(Range::Original(0..8)).unwrap(), "𝔾𝕠"); assert_eq!(current.get_range(Range::Original(0..4)).unwrap(), "𝔾"); assert_eq!( current.get_range_original(Range::Original(0..4)).unwrap(), "𝔾" ); assert_eq!( current.get_range_original(Range::Original(0..8)).unwrap(), "𝔾𝕠" ); // Equivalent to the previous one let mut current = s.clone(); current.transform_range(Range::Original(0..0), vec![('_', 1)], 0); assert_eq!( current, NormalizedString { original: "𝔾𝕠𝕠𝕕".into(), normalized: "_𝔾𝕠𝕠𝕕".into(), alignments: vec![ (0, 0), (0, 4), (0, 4), (0, 4), (0, 4), (4, 8), (4, 8), (4, 8), (4, 8), (8, 12), (8, 12), (8, 12), (8, 12), (12, 16), (12, 16), (12, 16), (12, 16) ], original_shift: 0, } ); assert_eq!( current.alignments_original(), vec![ (1, 5), (1, 5), (1, 5), (1, 5), (5, 9), (5, 9), (5, 9), (5, 9), (9, 13), (9, 13), (9, 13), (9, 13), (13, 17), (13, 17), (13, 17), (13, 17) ] ); assert_eq!(current.get_range(Range::Original(0..8)).unwrap(), "𝔾𝕠"); assert_eq!(current.get_range(Range::Original(0..4)).unwrap(), "𝔾"); assert_eq!( current.get_range_original(Range::Original(0..4)).unwrap(), "𝔾" ); assert_eq!( current.get_range_original(Range::Original(0..8)).unwrap(), "𝔾𝕠" ); // Adding as part of the first character let mut current = s.clone(); current.transform_range(Range::Original(0..4), vec![('𝔾', 0), ('o', 1)], 0); assert_eq!( current, NormalizedString { original: "𝔾𝕠𝕠𝕕".into(), normalized: "𝔾o𝕠𝕠𝕕".into(), alignments: vec![ (0, 4), (0, 4), (0, 4), (0, 4), (0, 4), (4, 8), (4, 8), (4, 8), (4, 8), (8, 12), (8, 12), (8, 12), (8, 12), (12, 16), (12, 16), (12, 16), (12, 16) ], original_shift: 0, } ); assert_eq!( current.alignments_original(), vec![ (0, 5), (0, 5), (0, 5), (0, 5), (5, 9), (5, 9), (5, 9), (5, 9), (9, 13), (9, 13), (9, 13), (9, 13), (13, 17), (13, 17), (13, 17), (13, 17) ] ); assert_eq!(current.get_range(Range::Original(0..8)).unwrap(), "𝔾o𝕠"); assert_eq!(current.get_range(Range::Original(0..4)).unwrap(), "𝔾o"); assert_eq!( current.get_range_original(Range::Original(0..4)).unwrap(), "𝔾" ); assert_eq!( current.get_range_original(Range::Original(0..8)).unwrap(), "𝔾𝕠" ); // Adding in the middle let mut current = s.clone(); current.transform_range( Range::Original(4..8), vec![('𝕠', 0), ('o', 1), ('o', 1), ('o', 1)], 0, ); assert_eq!( current, NormalizedString { original: "𝔾𝕠𝕠𝕕".into(), normalized: "𝔾𝕠ooo𝕠𝕕".into(), alignments: vec![ (0, 4), (0, 4), (0, 4), (0, 4), (4, 8), (4, 8), (4, 8), (4, 8), (4, 8), (4, 8), (4, 8), (8, 12), (8, 12), (8, 12), (8, 12), (12, 16), (12, 16), (12, 16), (12, 16) ], original_shift: 0, } ); assert_eq!( current.alignments_original(), vec![ (0, 4), (0, 4), (0, 4), (0, 4), (4, 11), (4, 11), (4, 11), (4, 11), (11, 15), (11, 15), (11, 15), (11, 15), (15, 19), (15, 19), (15, 19), (15, 19) ] ); // Adding at the end let mut current = s; current.transform_range(Range::Original(16..), vec![('!', 1)], 0); assert_eq!( current, NormalizedString { original: "𝔾𝕠𝕠𝕕".into(), normalized: "𝔾𝕠𝕠𝕕!".into(), alignments: vec![ (0, 4), (0, 4), (0, 4), (0, 4), (4, 8), (4, 8), (4, 8), (4, 8), (8, 12), (8, 12), (8, 12), (8, 12), (12, 16), (12, 16), (12, 16), (12, 16), (12, 16) ], original_shift: 0, } ); assert_eq!( current.alignments_original(), vec![ (0, 4), (0, 4), (0, 4), (0, 4), (4, 8), (4, 8), (4, 8), (4, 8), (8, 12), (8, 12), (8, 12), (8, 12), (12, 17), (12, 17), (12, 17), (12, 17) ] ); } #[test] fn transform_check() { let mut s = NormalizedString::from("abc…"); s.nfkd(); let transforms = vec![('a', -2), ('.', 0), ('.', 0), ('.', 0)]; s.transform(transforms, 0); s.lowercase(); assert_eq!(s.get(), "a..."); } }
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/tokenizer/added_vocabulary.rs
use super::{ normalizer::Range, Model, NormalizedString, Normalizer, Offsets, PreTokenizedString, Token, }; use aho_corasick::{AhoCorasick, AhoCorasickBuilder, MatchKind}; use regex::Regex; use serde::{ser::SerializeSeq, Deserialize, Serialize, Serializer}; use std::collections::{HashMap, HashSet}; /// Represent a token added by the user on top of the existing Model vocabulary. /// AddedToken can be configured to specify the behavior they should have in various situations /// like: /// - Whether they should only match single words /// - Whether to include any whitespace on its left or right #[derive(Debug, Clone, Serialize, Deserialize)] pub struct AddedToken { /// The content of the added token pub content: String, /// Whether this token must be a single word or can break words pub single_word: bool, /// Whether this token should strip whitespaces on its left pub lstrip: bool, /// Whether this token should strip whitespaces on its right pub rstrip: bool, /// Whether this token should be normalized pub normalized: bool, /// Whether this token is special pub special: bool, } impl AddedToken { /// Build this token from the given content, specifying if it is intented to be a /// special token. Special tokens are not normalized by default. pub fn from<S: Into<String>>(content: S, special: bool) -> Self { Self { content: content.into(), normalized: !special, special, ..Default::default() } } /// Specify whether this token should only match on whole single words, and never /// part of a word. #[must_use] pub fn single_word(mut self, single_word: bool) -> Self { self.single_word = single_word; self } /// Specify whether this token should include all the whitespaces on its left, in /// order to strip them out. #[must_use] pub fn lstrip(mut self, lstrip: bool) -> Self { self.lstrip = lstrip; self } /// Specify whether this token should include all the whitespaces on its right, in /// order to strip them out. #[must_use] pub fn rstrip(mut self, rstrip: bool) -> Self { self.rstrip = rstrip; self } /// Specify whether this token should be normalized and match against its normalized /// version in the input text. #[must_use] pub fn normalized(mut self, normalized: bool) -> Self { self.normalized = normalized; self } } impl Default for AddedToken { fn default() -> Self { Self { content: String::new(), single_word: false, lstrip: false, rstrip: false, normalized: true, special: false, } } } // We only want to hash on the content. AddedToken cannot be added multiple times with different // options impl std::hash::Hash for AddedToken { fn hash<H: std::hash::Hasher>(&self, state: &mut H) { self.content.hash(state); } } impl std::cmp::PartialEq for AddedToken { fn eq(&self, other: &Self) -> bool { self.content == other.content } } impl std::cmp::Eq for AddedToken {} type MatchingSet = (AhoCorasick, Vec<u32>); lazy_static! { static ref STARTS_WITH_WORD: Regex = Regex::new(r"^\w").unwrap(); static ref ENDS_WITH_WORD: Regex = Regex::new(r"\w$").unwrap(); static ref RIGHTMOST_SPACE_AT_START: Regex = Regex::new(r"^\s*").unwrap(); static ref LEFTMOST_SPACE_AT_END: Regex = Regex::new(r"\s*$").unwrap(); } fn ends_with_word(sentence: &str) -> bool { ENDS_WITH_WORD.is_match(sentence) } fn starts_with_word(sentence: &str) -> bool { STARTS_WITH_WORD.is_match(sentence) } fn space_leftmost_at_end(sentence: &str) -> usize { if let Some(match_) = LEFTMOST_SPACE_AT_END.find(sentence) { match_.start() } else { sentence.len() } } fn space_rightmost_at_start(sentence: &str) -> usize { if let Some(match_) = RIGHTMOST_SPACE_AT_START.find(sentence) { match_.end() } else { 0 } } /// /// A vocabulary built on top of the Model /// /// This provides a way to add new vocabulary to a Tokenizer that has already been trained, /// in a previous process, maybe by someone else. This is especially interesting in the case /// of fine-tunings, where we want to finetune a model while adding some new functionalities /// using some new special tokens, or maybe add some tokens in the case of unknown tokens, etc. /// /// One of the reasons we need to handle these tokens outside of the model is simply that /// for many models, it is not possible to add new tokens after the training process. For example, /// using BPE, the training process generates merges pairs along the vocabulary, and any token /// in the vocabulary can be decomposed in other tokens, down to the original alphabet. If we /// were to add new tokens after this training process, we couldn't make sure the merges pairs /// exist as required. /// #[derive(Clone, Debug)] pub(super) struct AddedVocabulary { /// Contains the mapping from String (token content) to ID. This map contains both special /// tokens and classic added tokens that were added to the this vocabulary. added_tokens_map: HashMap<String, u32>, /// Contains the mapping from ID to AddedToken for all the added tokens, both special /// and classic. added_tokens_map_r: HashMap<u32, AddedToken>, /// Contains only the classic AddedToken, in the specific order the user gave them. added_tokens: Vec<AddedToken>, /// Contains only the special AddedToken, in the specific order the user gave them. special_tokens: Vec<AddedToken>, /// A Set, containing all the special token for easy access while decoding. This let's /// us remove them easily with an O(1) complexity. special_tokens_set: HashSet<String>, /// A RegexSet containing all the non-normalized patterns used to split on AddedTokens split_trie: MatchingSet, /// A RegexSet containing all the normalized patterns used to split on AddedTokens split_normalized_trie: MatchingSet, } impl AddedVocabulary { pub fn new() -> Self { let trie = AhoCorasickBuilder::new() .match_kind(MatchKind::LeftmostLongest) .build::<_, &&[u8]>([]); let normalized_trie = AhoCorasickBuilder::new() .match_kind(MatchKind::LeftmostLongest) .build::<_, &&[u8]>([]); Self { added_tokens_map: HashMap::new(), added_tokens_map_r: HashMap::new(), added_tokens: vec![], special_tokens: vec![], special_tokens_set: HashSet::new(), split_trie: (trie, vec![]), split_normalized_trie: (normalized_trie, vec![]), } } /// Size of the additional vocabulary pub fn len(&self) -> usize { self.added_tokens_map.len() } /// Get the additional vocabulary pub fn get_vocab(&self) -> &HashMap<String, u32> { &self.added_tokens_map } /// Get the id matching one of our token if it exists pub fn token_to_id(&self, token: &str, model: &impl Model) -> Option<u32> { self.added_tokens_map .get(token) .copied() .or_else(|| model.token_to_id(token)) } /// Get the token matching the given id if it exists pub fn id_to_token(&self, id: u32, model: &impl Model) -> Option<String> { self.added_tokens_map_r .get(&id) .map(|t| t.content.clone()) .or_else(|| model.id_to_token(id)) } /// Check if a token is a special token pub fn is_special_token(&self, token: &str) -> bool { self.special_tokens_set.contains(token) } /// Add some special tokens to the vocabulary pub fn add_special_tokens<N: Normalizer>( &mut self, tokens: &[AddedToken], model: &impl Model, normalizer: Option<&N>, ) -> usize { self.add_tokens(tokens, model, normalizer) } /// Add some tokens to the vocabulary pub fn add_tokens<N: Normalizer>( &mut self, tokens: &[AddedToken], model: &impl Model, normalizer: Option<&N>, ) -> usize { // Handle special tokens (if any) for token in tokens { if token.special && !token.content.is_empty() && !self.special_tokens_set.contains(&token.content) { self.special_tokens.push(token.to_owned()); self.special_tokens_set.insert(token.content.clone()); } } // Then we delegate to `add_tokens`, that will take care of refreshing added tokens too. let mut ignored = 0; for token in tokens { if token.content.is_empty() { ignored += 1; continue; } let id = if let Some(id) = self.token_to_id(&token.content, model) { ignored += 1; id } else { let new_id = (model.get_vocab_size() + self.added_tokens_map.len()) as u32; self.added_tokens_map.insert(token.content.clone(), new_id); if !self.special_tokens_set.contains(&token.content) { self.added_tokens.push(token.clone()); } new_id }; // Update the current revert operation self.added_tokens_map_r .entry(id) .and_modify(|t| *t = token.clone()) .or_insert_with(|| token.clone()); } self.refresh_added_tokens(model, normalizer); // Return the number of added tokens tokens.len() - ignored } /// Reconstruct our internal RegexSet when new tokens are added to the vocabulary. /// /// We keep two different RegexSet, one that will take care of matching against the /// non-normalized string, and one matching against the normalized one. fn refresh_added_tokens<N: Normalizer>(&mut self, model: &impl Model, normalizer: Option<&N>) { type TupleTokenId<'a> = (&'a AddedToken, u32); let (normalized, non_normalized): (Vec<TupleTokenId>, Vec<TupleTokenId>) = self .special_tokens .iter() .chain(self.added_tokens.iter()) .map(|token| { ( token, self.token_to_id(&token.content, model) .expect("Missing additional token"), ) }) .partition(|(token, _)| token.normalized); let (tokens, ids): (Vec<&AddedToken>, Vec<u32>) = non_normalized.into_iter().unzip(); let trie = AhoCorasickBuilder::new() .match_kind(MatchKind::LeftmostLongest) .build(tokens.iter().map(|token| &token.content)); self.split_trie = (trie, ids); let (ntokens, nids): (Vec<&AddedToken>, Vec<u32>) = normalized.into_iter().unzip(); let patterns: Vec<_> = ntokens .iter() .map(|token| { let mut content = NormalizedString::from(token.content.as_ref()); if let Some(n) = normalizer { n.normalize(&mut content).unwrap(); } content }) .collect(); let normalized_trie = AhoCorasickBuilder::new() .match_kind(MatchKind::LeftmostLongest) .build(patterns.iter().map(|content| content.get())); self.split_normalized_trie = (normalized_trie, nids); } /// Find any AddedToken in the given sentence, using the provided MatchingSet. /// This method returns a list "splits", each of them being a pair of Offsets /// and an optional ID if it is an AddedToken. /// The list of splits cover the entire input string. fn find_matches(&self, sentence: &str, split_re: &MatchingSet) -> Vec<(Option<u32>, Offsets)> { if sentence.is_empty() { return vec![(None, (0, 0))]; } let mut start_offset = 0; let mut splits = vec![]; for mat in split_re.0.find_iter(sentence) { let mut start = mat.start(); let mut stop = mat.end(); let aho_id = mat.pattern(); let id = split_re.1[aho_id]; let added_token = &self.added_tokens_map_r.get(&id).unwrap(); if added_token.single_word { let start_space = start == 0 || !ends_with_word(&sentence[..start]); let stop_space = stop == sentence.len() || !starts_with_word(&sentence[stop..]); if !stop_space || !start_space { // Discard not single word continue; } } if added_token.lstrip { // This will be strictly inferior to start and in correct sentence offset let newstart = space_leftmost_at_end(&sentence[..start]); // The previous match could have already matched those spaces // Ignore them if it's already matched start = std::cmp::max(newstart, start_offset); } if added_token.rstrip { // This will starting a the stop+1 character, so we need // to add the previous stop value stop += space_rightmost_at_start(&sentence[stop..]) } if start_offset < start { splits.push((None, (start_offset, start))); } splits.push((Some(id), (start, stop))); start_offset = stop; } let total_byte_len = sentence.len(); if start_offset != total_byte_len { splits.push((None, (start_offset, total_byte_len))); } splits } /// Split the input sentence to extract anything we found from the `MatchingSet`, as well as /// the list of corresponding IDs /// The list of IDs have the exact same number of elements than the Iterator. fn split_with_indices( &self, sentence: NormalizedString, split_re: &MatchingSet, ) -> Vec<(NormalizedString, Option<Vec<Token>>)> { self.find_matches(sentence.get(), split_re) .into_iter() .map(|(id, byte_offsets)| { let slice = sentence .slice(Range::Normalized(byte_offsets.0..byte_offsets.1)) .expect("AddedVocabulary bad split"); if let Some(id) = id { let value = slice.get().to_owned(); let len = value.len(); (slice, Some(vec![Token::new(id, value, (0, len))])) } else { (slice, None) } }) .collect() } /// Extract the additional vocabulary from the given sentence, normalizing it along the way. /// /// Some tokens should match against their normalized representation, as well as the /// non-normalized one. For example, when we expect to extract the token `yesterday` in the /// input sentence `I read a book Yesterday`, if the normalizer is supposed to lowercase /// everything, we expect a match. pub fn extract_and_normalize<N: Normalizer>( &self, normalizer: Option<&N>, sequence: &str, ) -> PreTokenizedString { let mut pretokenized: PreTokenizedString = sequence.into(); // 1. We extract all the non-normalized tokens from the non-normalized string pretokenized .split(|_, sequence| Ok(self.split_with_indices(sequence, &self.split_trie))) .expect("AddedVocabulary bad split"); // 2. Then extract the normalized tokens from the normalized pieces of the string pretokenized .split(|_, mut sequence| { normalizer.map(|n| n.normalize(&mut sequence)); Ok(self.split_with_indices(sequence, &self.split_normalized_trie)) }) .expect("AddedVocabulary bad split"); pretokenized } } #[derive(Debug, Serialize, Deserialize)] pub(super) struct AddedTokenWithId { /// The id assigned to this token pub id: u32, #[serde(flatten)] /// The target AddedToken pub token: AddedToken, } impl Serialize for AddedVocabulary { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { let mut added_tokens = self .added_tokens_map_r .iter() .map(|(id, token)| AddedTokenWithId { id: *id, token: token.clone(), }) .collect::<Vec<_>>(); // We need to have these added tokens ordered by ascending ID added_tokens.sort_unstable_by_key(|o| o.id); let mut vocabulary = serializer.serialize_seq(Some(added_tokens.len()))?; for token in added_tokens { vocabulary.serialize_element(&token)?; } vocabulary.end() } } #[cfg(test)] mod tests { use super::*; use crate::normalizers::utils::Lowercase; use crate::normalizers::NormalizerWrapper; use crate::{OffsetReferential, OffsetType, Result, Token, Trainer}; use std::path::{Path, PathBuf}; #[derive(Serialize, Deserialize)] struct ModelMock { vocab: HashMap<String, u32>, vocab_r: HashMap<u32, String>, } impl ModelMock { pub fn new<I>(iter: I) -> Self where I: IntoIterator<Item = &'static (&'static str, u32)>, { let vocab: HashMap<String, u32> = iter .into_iter() .map(|&(tok, id)| (tok.to_string(), id)) .collect(); Self { vocab_r: vocab .iter() .map(|(tok, id)| (*id, tok.to_owned())) .collect(), vocab, } } } fn simplify_output(result: &'_ PreTokenizedString) -> Vec<(&'_ str, Option<Vec<u32>>)> { result .get_splits(OffsetReferential::Original, OffsetType::Byte) .into_iter() .map(|(s, _, tokens)| { ( s, tokens .as_ref() .map(|t| t.iter().map(|t| t.id).collect::<Vec<_>>()), ) }) .collect::<Vec<_>>() } struct TrainerMock; impl Trainer for TrainerMock { type Model = ModelMock; fn should_show_progress(&self) -> bool { true } fn train(&self, _model: &mut ModelMock) -> Result<Vec<AddedToken>> { unimplemented!() } fn feed<I, S, F>(&mut self, _iterator: I, _process: F) -> Result<()> where I: Iterator<Item = S> + Send, S: AsRef<str> + Send, F: Fn(&str) -> Result<Vec<String>> + Sync, { unimplemented!() } } impl Model for ModelMock { type Trainer = TrainerMock; fn tokenize(&self, _sequence: &str) -> Result<Vec<Token>> { unimplemented!() } fn token_to_id(&self, token: &str) -> Option<u32> { self.vocab.get(token).copied() } fn id_to_token(&self, id: u32) -> Option<String> { self.vocab_r.get(&id).cloned() } fn get_vocab(&self) -> HashMap<String, u32> { self.vocab.clone() } fn get_vocab_size(&self) -> usize { self.vocab.len() } fn save(&self, _folder: &Path, _name: Option<&str>) -> Result<Vec<PathBuf>> { unimplemented!() } fn get_trainer(&self) -> Self::Trainer { TrainerMock } } #[test] fn can_add_tokens() { let model = ModelMock::new(&[("test", 0), ("tost", 1)]); let mut vocab = AddedVocabulary::new(); let normalizer: Option<&NormalizerWrapper> = None; // Add tokens normally assert_eq!( vocab.add_tokens( &[AddedToken::from("added_token_1", false)], &model, normalizer ), 1 ); assert_eq!(vocab.len(), 1); // Does not add multiple time the same token assert_eq!( vocab.add_tokens( &[ AddedToken::from("added_token_2", false), AddedToken::from("added_token_2", false) ], &model, normalizer ), 1 ); assert_eq!(vocab.len(), 2); // Does not add tokens already covered by the model assert_eq!( vocab.add_tokens(&[AddedToken::from("test", false)], &model, normalizer), 0 ); assert_eq!(vocab.len(), 2); } #[test] fn can_add_special_tokens() { let model = ModelMock::new(&[("test", 0), ("tost", 1)]); let mut vocab = AddedVocabulary::new(); let normalizer: Option<&NormalizerWrapper> = None; // Add tokens normally assert_eq!( vocab.add_special_tokens( &[AddedToken::from("added_token_1", true)], &model, normalizer ), 1 ); assert_eq!(vocab.len(), 1); // Does not add multiple time the same token assert_eq!( vocab.add_special_tokens( &[ AddedToken::from("added_token_2", true), AddedToken::from("added_token_2", true) ], &model, normalizer ), 1 ); assert_eq!(vocab.len(), 2); // Can add tokens already covered by the model assert_eq!( vocab.add_special_tokens(&[AddedToken::from("test", true)], &model, normalizer), 0 ); assert_eq!(vocab.len(), 2); // Did not add a new token, since it exist in the original model assert!(vocab.is_special_token("test")); assert!(!vocab.added_tokens_map.contains_key("test")); } #[test] fn can_extract_added_tokens() { // Is able to extract both normal and special tokens let model = ModelMock::new(&[]); let mut vocab = AddedVocabulary::new(); let normalizer: Option<&NormalizerWrapper> = None; vocab.add_tokens( &[ AddedToken::from("my", false), AddedToken::from("name", false), ], &model, normalizer, ); vocab.add_special_tokens( &[ AddedToken::from("[CLS]", true), AddedToken::from("[SEP]", true), ], &model, normalizer, ); let result = vocab.extract_and_normalize(normalizer, "[CLS] My name is Anthony [SEP]"); assert_eq!( result .get_splits(OffsetReferential::Original, OffsetType::Byte) .into_iter() .map(|(s, _, tokens)| ( s, tokens .as_ref() .map(|t| t.iter().map(|t| t.id).collect::<Vec<_>>()) )) .collect::<Vec<_>>(), vec![ ("[CLS]", Some(vec![2])), (" My ", None), ("name", Some(vec![1])), (" is Anthony ", None), ("[SEP]", Some(vec![3])) ] ); } #[test] fn options_use_cases() { // Is able to extract both normal and special tokens, with various options (lstrip, rstrip, // single_word, normalized) let model = ModelMock::new(&[]); let normalizer = Lowercase; let mut vocab = AddedVocabulary::new(); vocab.add_tokens( &[ AddedToken::from("my", false).lstrip(true).rstrip(true), AddedToken::from("name", false), AddedToken::from("ony", false).single_word(true), ], &model, Some(&normalizer), ); vocab.add_special_tokens( &[ AddedToken::from("[CLS]", true), AddedToken::from("[SEP]", true), ], &model, Some(&normalizer), ); let result = vocab.extract_and_normalize(Some(&normalizer), "[CLS] My name is Anthony [SEP]"); assert_eq!( simplify_output(&result), vec![ ("[CLS]", Some(vec![3])), // This one includes both spaces because of the lstrip & rstrip // And it matches because normalized == true (" my ", Some(vec![0])), ("name", Some(vec![1])), // `ony` is not extracted here thanks to single_word (" is anthony ", None), ("[SEP]", Some(vec![4])), ] ); } #[test] fn empty_matches() { let vocab = AddedVocabulary::new(); let matches = vocab.find_matches("", &vocab.split_trie); assert_eq!(matches, vec![(None, (0, 0))]); } #[test] fn test_single_word_is_correct() { // Is able to extract both normal and special tokens, with various options (lstrip, rstrip, // single_word, normalized) let model = ModelMock::new(&[]); let mut vocab = AddedVocabulary::new(); let normalizer = Lowercase; vocab.add_tokens( &[AddedToken::from("<mask>", false).single_word(true)], &model, Some(&normalizer), ); // Left, in the middle, non single world left, non single word right, end of sentence valid let result = vocab.extract_and_normalize( Some(&normalizer), "<mask> My name <mask> A<mask> <mask>ony <mask>", ); assert_eq!( simplify_output(&result), vec![ ("<mask>", Some(vec![0])), (" my name ", None), ("<mask>", Some(vec![0])), (" a<mask> <mask>ony ", None), ("<mask>", Some(vec![0])) ] ); } #[test] fn test_single_word_is_unicode_correct() { let model = ModelMock::new(&[]); let mut vocab = AddedVocabulary::new(); let normalizer = Lowercase; vocab.add_tokens( &[AddedToken::from("<mask>", false).single_word(true)], &model, Some(&normalizer), ); let result = vocab.extract_and_normalize(Some(&normalizer), "<mask>, <mask>- ◌̰<mask>"); assert_eq!( simplify_output(&result), vec![ // Punctuation is not word ("<mask>", Some(vec![0])), (", ", None), // dash is not word ("<mask>", Some(vec![0])), // This is unicode combining mark character and is word: https://en.wikipedia.org/wiki/Combining_Diacritical_Marks ("- ◌̰<mask>", None), ] ); } #[test] fn test_lstrip_unicode_space() { let model = ModelMock::new(&[]); let mut vocab = AddedVocabulary::new(); let normalizer = Lowercase; vocab.add_tokens( &[AddedToken::from("<mask>", false) .lstrip(true) .rstrip(true) .single_word(true)], &model, Some(&normalizer), ); let result = vocab .extract_and_normalize(Some(&normalizer), "Hi <mask> there\t<mask>\t<mask>\u{2000}"); assert_eq!( simplify_output(&result), vec![ ("hi", None), // Regular space (" <mask> ", Some(vec![0])), ("there", None), // \t is a spacing character ("\t<mask>\t", Some(vec![0])), // Non overlapping // \u{2000} is mongolian vowel separator: https://jkorpela.fi/chars/spaces.html ("<mask>\u{2000}", Some(vec![0])), ] ); } }
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/utils/parallelism.rs
//! //! This module defines helpers to allow optional Rayon usage. //! use rayon::iter::IterBridge; use rayon::prelude::*; use rayon_cond::CondIterator; // Re-export rayon current_num_threads pub use rayon::current_num_threads; pub const ENV_VARIABLE: &str = "TOKENIZERS_PARALLELISM"; // Reading/Writing this variable should always happen on the main thread static mut USED_PARALLELISM: bool = false; /// Check if the TOKENIZERS_PARALLELISM env variable has been explicitly set pub fn is_parallelism_configured() -> bool { std::env::var(ENV_VARIABLE).is_ok() } /// Check if at some point we used a parallel iterator pub fn has_parallelism_been_used() -> bool { unsafe { USED_PARALLELISM } } /// Get the currently set value for `TOKENIZERS_PARALLELISM` env variable pub fn get_parallelism() -> bool { match std::env::var(ENV_VARIABLE) { Ok(mut v) => { v.make_ascii_lowercase(); !matches!(v.as_ref(), "" | "off" | "false" | "f" | "no" | "n" | "0") } Err(_) => true, // If we couldn't get the variable, we use the default } } /// Set the value for `TOKENIZERS_PARALLELISM` for the current process pub fn set_parallelism(val: bool) { std::env::set_var(ENV_VARIABLE, if val { "true" } else { "false" }) } /// Allows to convert into an iterator that can be executed either parallelly or serially. /// /// The choice is made according to the currently set `TOKENIZERS_PARALLELISM` environment variable. /// This variable can have one of the following values /// - False => "" (empty value), "false", "f", "off", "no", "n", "0" /// - True => Any other value /// pub trait MaybeParallelIterator<P, S> where P: ParallelIterator, S: Iterator<Item = P::Item>, { /// Convert ourself in a CondIterator, that will be executed either in parallel or serially, /// based solely on the `TOKENIZERS_PARALLELISM` environment variable fn into_maybe_par_iter(self) -> CondIterator<P, S>; /// Convert ourself in a CondIterator, that will be executed either in parallel or serially, /// based on both the `TOKENIZERS_PARALLELISM` environment variable and the provided bool. /// Both must be true to run with parallelism activated. fn into_maybe_par_iter_cond(self, cond: bool) -> CondIterator<P, S>; } impl<P, S, I> MaybeParallelIterator<P, S> for I where I: IntoParallelIterator<Iter = P, Item = P::Item> + IntoIterator<IntoIter = S, Item = S::Item>, P: ParallelIterator, S: Iterator<Item = P::Item>, { fn into_maybe_par_iter(self) -> CondIterator<P, S> { let parallelism = get_parallelism(); if parallelism { unsafe { USED_PARALLELISM = true }; } CondIterator::new(self, parallelism) } fn into_maybe_par_iter_cond(self, cond: bool) -> CondIterator<P, S> { if cond { self.into_maybe_par_iter() } else { CondIterator::from_serial(self) } } } /// Shared reference version of MaybeParallelIterator, works the same but returns an iterator /// over references, does not consume self pub trait MaybeParallelRefIterator<'data, P, S> where P: ParallelIterator, S: Iterator<Item = P::Item>, P::Item: 'data, { fn maybe_par_iter(&'data self) -> CondIterator<P, S>; fn maybe_par_iter_cond(&'data self, cond: bool) -> CondIterator<P, S>; } impl<'data, P, S, I: 'data + ?Sized> MaybeParallelRefIterator<'data, P, S> for I where &'data I: MaybeParallelIterator<P, S>, P: ParallelIterator, S: Iterator<Item = P::Item>, P::Item: 'data, { fn maybe_par_iter(&'data self) -> CondIterator<P, S> { self.into_maybe_par_iter() } fn maybe_par_iter_cond(&'data self, cond: bool) -> CondIterator<P, S> { self.into_maybe_par_iter_cond(cond) } } /// Exclusive reference version of MaybeParallelIterator, works the same but returns an iterator /// over mutable references, does not consume self pub trait MaybeParallelRefMutIterator<'data, P, S> where P: ParallelIterator, S: Iterator<Item = P::Item>, P::Item: 'data, { fn maybe_par_iter_mut(&'data mut self) -> CondIterator<P, S>; fn maybe_par_iter_mut_cond(&'data mut self, cond: bool) -> CondIterator<P, S>; } impl<'data, P, S, I: 'data + ?Sized> MaybeParallelRefMutIterator<'data, P, S> for I where &'data mut I: MaybeParallelIterator<P, S>, P: ParallelIterator, S: Iterator<Item = P::Item>, P::Item: 'data, { fn maybe_par_iter_mut(&'data mut self) -> CondIterator<P, S> { self.into_maybe_par_iter() } fn maybe_par_iter_mut_cond(&'data mut self, cond: bool) -> CondIterator<P, S> { self.into_maybe_par_iter_cond(cond) } } /// Converts any serial iterator into a CondIterator, that can either run parallelly or serially. pub trait MaybeParallelBridge<T, S> where S: Iterator<Item = T> + Send, T: Send, { fn maybe_par_bridge(self) -> CondIterator<IterBridge<S>, S>; fn maybe_par_bridge_cond(self, cond: bool) -> CondIterator<IterBridge<S>, S>; } impl<T, S> MaybeParallelBridge<T, S> for S where S: Iterator<Item = T> + Send, T: Send, { fn maybe_par_bridge(self) -> CondIterator<IterBridge<S>, S> { let iter = CondIterator::from_serial(self); if get_parallelism() { unsafe { USED_PARALLELISM = true }; CondIterator::from_parallel(iter.into_parallel().right().unwrap()) } else { iter } } fn maybe_par_bridge_cond(self, cond: bool) -> CondIterator<IterBridge<S>, S> { if cond { self.maybe_par_bridge() } else { CondIterator::from_serial(self) } } } /// Allows to convert into `chunks` that can be executed either parallelly or serially. pub trait MaybeParallelSlice<'data, T> where T: Sync, { /// Create a CondIterator, that will be executed either in parallel or serially, /// based solely on the `TOKENIZERS_PARALLELISM` environment variable fn maybe_par_chunks( &'_ self, chunk_size: usize, ) -> CondIterator<rayon::slice::Chunks<'_, T>, std::slice::Chunks<'_, T>>; /// Create a CondIterator, that will be executed either in parallel or serially, /// based on both the `TOKENIZERS_PARALLELISM` environment variable and the provided bool. /// Both must be true to run with parallelism activated. fn maybe_par_chunks_cond( &'_ self, cond: bool, chunk_size: usize, ) -> CondIterator<rayon::slice::Chunks<'_, T>, std::slice::Chunks<'_, T>>; } impl<T> MaybeParallelSlice<'_, T> for [T] where T: Sync, { fn maybe_par_chunks( &'_ self, chunk_size: usize, ) -> CondIterator<rayon::slice::Chunks<'_, T>, std::slice::Chunks<'_, T>> { let parallelism = get_parallelism(); if parallelism { CondIterator::from_parallel(self.par_chunks(chunk_size)) } else { CondIterator::from_serial(self.chunks(chunk_size)) } } fn maybe_par_chunks_cond( &'_ self, cond: bool, chunk_size: usize, ) -> CondIterator<rayon::slice::Chunks<'_, T>, std::slice::Chunks<'_, T>> { if cond { self.maybe_par_chunks(chunk_size) } else { CondIterator::from_serial(self.chunks(chunk_size)) } } } #[cfg(test)] mod tests { use super::*; #[test] fn test_maybe_parallel_iterator() { let mut v = vec![1u32, 2, 3, 4, 5, 6]; assert_eq!(v.maybe_par_iter().sum::<u32>(), 21); assert_eq!( v.maybe_par_iter_mut() .map(|v| { *v *= 2; *v }) .sum::<u32>(), 42 ); assert_eq!(v.maybe_par_iter().sum::<u32>(), 42); assert_eq!(v.into_maybe_par_iter().sum::<u32>(), 42); } #[test] fn test_maybe_parallel_slice() { let v = vec![1, 2, 3, 4, 5]; let chunks: Vec<_> = v.maybe_par_chunks(2).collect(); assert_eq!(chunks, vec![&[1, 2][..], &[3, 4], &[5]]); } }
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/utils/mod.rs
pub(crate) mod cache; #[cfg(feature = "http")] pub(crate) mod from_pretrained; #[cfg(feature = "unstable_wasm")] mod fancy; #[cfg(feature = "unstable_wasm")] pub use fancy::SysRegex; #[cfg(not(feature = "unstable_wasm"))] mod onig; #[cfg(not(feature = "unstable_wasm"))] pub use crate::utils::onig::SysRegex; pub mod iter; pub mod padding; pub mod parallelism; pub(crate) mod progress; pub mod truncation; use serde::{Serialize, Serializer}; use std::collections::{BTreeMap, HashMap}; pub(crate) fn ordered_map<S, K, V>( value: &HashMap<K, V>, serializer: S, ) -> std::result::Result<S::Ok, S::Error> where S: Serializer, K: Serialize + std::cmp::Ord, V: Serialize, { let ordered: BTreeMap<_, _> = value.iter().collect(); ordered.serialize(serializer) } macro_rules! impl_enum_from ( ($from_ty:ty, $enum:ty, $variant:ident) => { impl From<$from_ty> for $enum { fn from(from: $from_ty) -> Self { <$enum>::$variant(from) } } } ); /// Implement `serde::{Serialize, Serializer}` with `#[serde(tag = "type")]` attribute for a given struct. /// Panic when a json string being deserilized misses field `type`. /// /// # Examples /// /// ``` /// # #[macro_use] extern crate tokenizers; /// use serde::{Serialize, Deserialize}; /// /// fn main() { /// impl_serde_type!{ /// #[derive(Debug)] /// struct Point { /// x: i32, /// #[serde(default = "default_y")] /// y: i32, /// } /// } /// fn default_y() -> i32 { /// 5 /// } /// /// let point = Point { x: 1, y: 2 }; /// let serialized_s = r#"{"type":"Point","x":1,"y":2}"#; /// assert_eq!(serde_json::to_string(&point).unwrap(), serialized_s); /// } /// ``` /// /// ```should_panic /// # #[macro_use] extern crate tokenizers; /// use serde::{Serialize, Deserialize}; /// /// fn main() { /// impl_serde_type!{ /// #[derive(Debug)] /// struct Point1D { /// x: i32, /// } /// } /// /// let serialized_s = r#"{"x":1}"#; /// let deserialized: Point1D = serde_json::from_str(serialized_s).unwrap(); /// } /// ``` /// /// # Examples (unit structs) /// /// ``` /// # #[macro_use] extern crate tokenizers; /// use serde::{Serialize, Deserialize}; /// /// fn main() { /// impl_serde_type!{ /// struct Unit; /// } /// /// let unit = Unit; /// let serialized_s = r#"{"type":"Unit"}"#; /// assert_eq!(serde_json::to_string(&unit).unwrap(), serialized_s); /// } /// ``` /// /// ```should_panic /// # #[macro_use] extern crate tokenizers; /// use serde::{Serialize, Deserialize}; /// /// fn main() { /// impl_serde_type!{ /// struct Unit; /// } /// /// let serialized_s = r#"{"some_field":1}"#; /// let deserialized: Unit = serde_json::from_str(serialized_s).unwrap(); /// } /// ``` #[macro_export] macro_rules! impl_serde_type{ ( $(#[$meta:meta])* $vis:vis struct $struct_name:ident { $( $(#[$field_meta:meta])* $field_vis:vis $field_name:ident : $field_type:ty ),*$(,)+ } ) => { paste::paste!{ $(#[$meta])* #[derive(Serialize, Deserialize)] #[serde(tag = "type", from = $struct_name "Deserializer")] $vis struct $struct_name{ $( $(#[$field_meta])* $field_vis $field_name : $field_type, )* } #[doc(hidden)] $(#[$meta])* #[derive(Deserialize)] #[serde(tag = "type", remote = $struct_name "")] struct [<$struct_name Def>]{ $( $(#[$field_meta])* $field_vis $field_name : $field_type, )* } #[doc(hidden)] #[derive(Deserialize)] enum [<$struct_name Type>] { $struct_name, } #[doc(hidden)] #[derive(Deserialize)] struct [<$struct_name Deserializer>] { #[allow(dead_code)] r#type: [<$struct_name Type>], #[serde(flatten, with = $struct_name "Def")] r#struct: $struct_name, } #[doc(hidden)] impl std::convert::From<[<$struct_name Deserializer>]> for $struct_name { fn from(v: [<$struct_name Deserializer>]) -> Self { v.r#struct } } } }; ( $(#[$meta:meta])* $vis:vis struct $struct_name:ident; ) => { paste::paste!{ $(#[$meta])* $vis struct $struct_name; impl serde::Serialize for $struct_name { fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error> where S: serde::ser::Serializer { let helper = [<$struct_name Helper>]{r#type: [<$struct_name Type>]::$struct_name}; helper.serialize(serializer) } } impl<'de> serde::Deserialize<'de> for $struct_name { fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error> where D: serde::Deserializer<'de>, { let _helper = [<$struct_name Helper>]::deserialize(deserializer)?; Ok($struct_name) } } #[derive(serde::Serialize, serde::Deserialize)] enum [<$struct_name Type>] { $struct_name, } #[derive(serde::Serialize, serde::Deserialize)] struct [<$struct_name Helper>] { #[allow(dead_code)] r#type: [<$struct_name Type>], } } } } // Re-export macro_rules_attribute pub use macro_rules_attribute::macro_rules_attribute;
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/utils/truncation.rs
use crate::tokenizer::{Encoding, Result}; use serde::{Deserialize, Serialize}; use std::cmp; use std::mem; #[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize, Eq, Default)] pub enum TruncationDirection { Left, #[default] Right, } impl std::convert::AsRef<str> for TruncationDirection { fn as_ref(&self) -> &str { match self { TruncationDirection::Left => "left", TruncationDirection::Right => "right", } } } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct TruncationParams { #[serde(default)] pub direction: TruncationDirection, pub max_length: usize, pub strategy: TruncationStrategy, pub stride: usize, } impl Default for TruncationParams { fn default() -> Self { Self { max_length: 512, strategy: TruncationStrategy::default(), stride: 0, direction: TruncationDirection::default(), } } } #[derive(thiserror::Error, Debug)] pub enum TruncationError { /// We are supposed to truncate the pair sequence, but it has not been provided. #[error("Truncation error: Second sequence not provided")] SecondSequenceNotProvided, /// We cannot truncate the target sequence enough to respect the provided max length. #[error("Truncation error: Sequence to truncate too short to respect the provided max_length")] SequenceTooShort, } #[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize, Eq)] pub enum TruncationStrategy { LongestFirst, OnlyFirst, OnlySecond, } impl Default for TruncationStrategy { fn default() -> Self { Self::LongestFirst } } impl std::convert::AsRef<str> for TruncationStrategy { fn as_ref(&self) -> &str { match self { Self::LongestFirst => "longest_first", Self::OnlyFirst => "only_first", Self::OnlySecond => "only_second", } } } pub fn truncate_encodings( mut encoding: Encoding, mut pair_encoding: Option<Encoding>, params: &TruncationParams, ) -> Result<(Encoding, Option<Encoding>)> { if params.max_length == 0 { encoding.truncate(0, params.stride, params.direction); if let Some(other_encoding) = pair_encoding.as_mut() { other_encoding.truncate(0, params.stride, params.direction); } return Ok((encoding, pair_encoding)); } let total_length = encoding.get_ids().len() + pair_encoding .as_ref() .map(|e| e.get_ids().len()) .unwrap_or(0); let to_remove = if total_length > params.max_length { total_length - params.max_length } else { return Ok((encoding, pair_encoding)); }; match params.strategy { TruncationStrategy::LongestFirst => { if let Some(other_encoding) = pair_encoding.as_mut() { // Assuming n1 <= n2, there are 3 cases // Case 1: // No truncation needs to be performed. // This scenario is handled before the match. // Case 2: // Only the longer input needs to be truncated. // n1 = n1 // n2 = max_length - n1 // Case 3: // Both inputs must be truncated. // n1 = max_length / 2 // n2 = n1 + max_length % 2 let mut n1 = encoding.get_ids().len(); let mut n2 = other_encoding.get_ids().len(); let mut swap = false; // Ensure n1 is the length of the shortest input if n1 > n2 { swap = true; mem::swap(&mut n1, &mut n2); } if n1 > params.max_length { // This needs to be a special case // to avoid max_length - n1 < 0 // since n1 and n2 are unsigned n2 = n1; } else { n2 = cmp::max(n1, params.max_length - n1); } if n1 + n2 > params.max_length { n1 = params.max_length / 2; n2 = n1 + params.max_length % 2; } // Swap lengths if we swapped previosuly if swap { mem::swap(&mut n1, &mut n2); } encoding.truncate(n1, params.stride, params.direction); other_encoding.truncate(n2, params.stride, params.direction); } else { encoding.truncate(total_length - to_remove, params.stride, params.direction); } } TruncationStrategy::OnlyFirst | TruncationStrategy::OnlySecond => { let target = if params.strategy == TruncationStrategy::OnlyFirst { Ok(&mut encoding) } else if let Some(encoding) = pair_encoding.as_mut() { Ok(encoding) } else { Err(Box::new(TruncationError::SecondSequenceNotProvided)) }?; let target_len = target.get_ids().len(); if target_len > to_remove { target.truncate(target_len - to_remove, params.stride, params.direction); } else { return Err(Box::new(TruncationError::SequenceTooShort)); } } } Ok((encoding, pair_encoding)) } #[cfg(test)] mod tests { use super::*; use crate::tokenizer::Encoding; use std::collections::HashMap; fn get_empty() -> Encoding { Encoding::new( vec![], vec![], vec![], vec![], vec![], vec![], vec![], vec![], HashMap::new(), ) } fn get_short() -> Encoding { Encoding::new( vec![1, 2], vec![0, 0], vec![String::from("a"), String::from("b")], vec![Some(0), Some(1)], vec![(0, 1), (1, 2)], vec![0, 0], vec![1, 1], vec![], HashMap::new(), ) } fn get_medium() -> Encoding { Encoding::new( vec![3, 4, 5, 6], vec![0, 0, 0, 0], vec![ String::from("d"), String::from("e"), String::from("f"), String::from("g"), ], vec![Some(0), Some(1), Some(2), Some(3)], vec![(0, 1), (1, 2), (2, 3), (3, 4)], vec![0, 0, 0, 0], vec![1, 1, 1, 1], vec![], HashMap::new(), ) } fn get_long() -> Encoding { Encoding::new( vec![7, 8, 9, 10, 11, 12, 13, 14], vec![0, 0, 0, 0, 0, 0, 0, 0], vec![ String::from("h"), String::from("i"), String::from("j"), String::from("k"), String::from("l"), String::from("m"), String::from("n"), String::from("o"), ], vec![ Some(0), Some(1), Some(2), Some(3), Some(4), Some(5), Some(6), Some(7), ], vec![ (0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (6, 8), ], vec![0, 0, 0, 0, 0, 0, 0, 0], vec![1, 1, 1, 1, 1, 1, 1, 1], vec![], HashMap::new(), ) } fn truncate_and_assert( encoding1: Encoding, encoding2: Encoding, params: &TruncationParams, n1: usize, n2: usize, ) { match truncate_encodings(encoding1, Some(encoding2), params) { Ok((e1, Some(e2))) => { assert!(e1.get_ids().len() == n1); assert!(e2.get_ids().len() == n2); } _ => panic!(), }; } #[test] fn truncate_encodings_longest_first() { let params = TruncationParams { max_length: 7, strategy: TruncationStrategy::LongestFirst, stride: 0, direction: TruncationDirection::Right, }; truncate_and_assert(get_empty(), get_empty(), &params, 0, 0); truncate_and_assert(get_empty(), get_short(), &params, 0, 2); truncate_and_assert(get_empty(), get_medium(), &params, 0, 4); truncate_and_assert(get_empty(), get_long(), &params, 0, 7); truncate_and_assert(get_short(), get_empty(), &params, 2, 0); truncate_and_assert(get_short(), get_short(), &params, 2, 2); truncate_and_assert(get_short(), get_medium(), &params, 2, 4); truncate_and_assert(get_short(), get_long(), &params, 2, 5); truncate_and_assert(get_medium(), get_empty(), &params, 4, 0); truncate_and_assert(get_medium(), get_short(), &params, 4, 2); truncate_and_assert(get_medium(), get_medium(), &params, 3, 4); truncate_and_assert(get_medium(), get_long(), &params, 3, 4); truncate_and_assert(get_long(), get_empty(), &params, 7, 0); truncate_and_assert(get_long(), get_short(), &params, 5, 2); truncate_and_assert(get_long(), get_medium(), &params, 4, 3); truncate_and_assert(get_long(), get_long(), &params, 3, 4); } #[test] fn truncate_encodings_empty() { let params = TruncationParams { max_length: 0, strategy: TruncationStrategy::LongestFirst, stride: 0, direction: TruncationDirection::Right, }; truncate_and_assert(get_empty(), get_short(), &params, 0, 0); truncate_and_assert(get_medium(), get_medium(), &params, 0, 0); truncate_and_assert(get_long(), get_long(), &params, 0, 0); } #[test] fn test_deserialize_defaults() { let old_truncation_params = r#"{"max_length":256,"strategy":"LongestFirst","stride":0}"#; let params: TruncationParams = serde_json::from_str(old_truncation_params).unwrap(); assert_eq!(params.direction, TruncationDirection::Right); } }
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/utils/fancy.rs
use fancy_regex::Regex; use std::error::Error; #[derive(Debug)] pub struct SysRegex { regex: Regex, } impl SysRegex { pub fn find_iter<'r, 't>(&'r self, inside: &'t str) -> Matches<'r, 't> { Matches(self.regex.find_iter(inside)) } pub fn new(regex_str: &str) -> Result<Self, Box<dyn Error + Send + Sync + 'static>> { Ok(Self { regex: Regex::new(regex_str)?, }) } } pub struct Matches<'r, 't>(fancy_regex::Matches<'r, 't>); impl<'r, 't> Iterator for Matches<'r, 't> { type Item = (usize, usize); fn next(&mut self) -> Option<Self::Item> { match self.0.next() { Some(Ok(mat)) => Some((mat.start(), mat.end())), // stop if an error is encountered None | Some(Err(_)) => None, } } }
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/utils/padding.rs
use crate::parallelism::*; use crate::tokenizer::{Encoding, Result}; use serde::{Deserialize, Serialize}; /// The various possible padding directions. #[derive(Debug, Clone, Copy, Serialize, Deserialize)] pub enum PaddingDirection { Left, Right, } impl std::convert::AsRef<str> for PaddingDirection { fn as_ref(&self) -> &str { match self { PaddingDirection::Left => "left", PaddingDirection::Right => "right", } } } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct PaddingParams { pub strategy: PaddingStrategy, pub direction: PaddingDirection, pub pad_to_multiple_of: Option<usize>, pub pad_id: u32, pub pad_type_id: u32, pub pad_token: String, } impl Default for PaddingParams { fn default() -> Self { Self { strategy: PaddingStrategy::BatchLongest, direction: PaddingDirection::Right, pad_to_multiple_of: None, pad_id: 0, pad_type_id: 0, pad_token: String::from("[PAD]"), } } } #[derive(Debug, Clone, Serialize, Deserialize)] pub enum PaddingStrategy { BatchLongest, Fixed(usize), } pub fn pad_encodings(encodings: &mut [Encoding], params: &PaddingParams) -> Result<()> { if encodings.is_empty() { return Ok(()); } let mut pad_length = match params.strategy { PaddingStrategy::Fixed(size) => size, PaddingStrategy::BatchLongest => encodings .maybe_par_iter() .map(|e| e.get_ids().len()) .max() .unwrap(), }; if let Some(multiple) = params.pad_to_multiple_of { if multiple > 0 && pad_length % multiple > 0 { pad_length += multiple - pad_length % multiple; } } encodings.maybe_par_iter_mut().for_each(|encoding| { encoding.pad( pad_length, params.pad_id, params.pad_type_id, &params.pad_token, params.direction, ) }); Ok(()) } #[cfg(test)] mod tests { use super::*; use crate::tokenizer::Encoding; use std::collections::HashMap; #[test] fn pad_to_multiple() { fn get_encodings() -> [Encoding; 2] { [ Encoding::new( vec![0, 1, 2, 3, 4], vec![], vec![], vec![], vec![], vec![], vec![], vec![], HashMap::new(), ), Encoding::new( vec![0, 1, 2], vec![], vec![], vec![], vec![], vec![], vec![], vec![], HashMap::new(), ), ] } // Test fixed let mut encodings = get_encodings(); let mut params = PaddingParams { strategy: PaddingStrategy::Fixed(7), direction: PaddingDirection::Right, pad_to_multiple_of: Some(8), pad_id: 0, pad_type_id: 0, pad_token: String::from("[PAD]"), }; pad_encodings(&mut encodings, &params).unwrap(); assert!(encodings.iter().all(|e| e.get_ids().len() == 8)); // Test batch let mut encodings = get_encodings(); params.strategy = PaddingStrategy::BatchLongest; params.pad_to_multiple_of = Some(6); pad_encodings(&mut encodings, &params).unwrap(); assert!(encodings.iter().all(|e| e.get_ids().len() == 6)); // Do not crash with 0 params.pad_to_multiple_of = Some(0); pad_encodings(&mut encodings, &params).unwrap(); } }
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/utils/cache.rs
use std::borrow::Borrow; use std::collections::HashMap; use std::hash::Hash; use std::sync::RwLock; /// The default capacity for a `BPE`'s internal cache. pub static DEFAULT_CACHE_CAPACITY: usize = 10_000; /// Provides a simple multithread cache to speed up BPE tokenization that will try to read values /// concurrently but won't block if another thread is writing. /// The goal is clearly not the accuracy of the content, both get and set /// are not guaranteed to actually get or set. #[derive(Debug)] pub(crate) struct Cache<K, V> where K: Eq + Hash + Clone, V: Clone, { map: RwLock<HashMap<K, V>>, pub capacity: usize, } // We dont really care about Cache comparison, so let's make them always equal impl<K, V> PartialEq for Cache<K, V> where K: Eq + Hash + Clone, V: Clone, { fn eq(&self, _other: &Cache<K, V>) -> bool { true } } impl<K, V> Default for Cache<K, V> where K: Eq + Hash + Clone, V: Clone, { fn default() -> Self { Self::new(DEFAULT_CACHE_CAPACITY) } } impl<K, V> Cache<K, V> where K: Eq + Hash + Clone, V: Clone, { /// Create new `Cache` with the given capacity. pub(crate) fn new(capacity: usize) -> Self { let map = RwLock::new(HashMap::with_capacity(capacity)); Cache { map, capacity } } /// Create a fresh `Cache` with the same configuration. pub(crate) fn fresh(&self) -> Self { Self::new(self.capacity) } /// Clear the cache. pub(crate) fn clear(&self) { self.map.write().unwrap().clear(); } #[allow(dead_code)] pub(crate) fn get_values<'a, I, Q>(&self, keys_iter: I) -> Option<Vec<Option<V>>> where I: Iterator<Item = &'a Q>, K: Borrow<Q>, Q: Hash + Eq + ?Sized + 'a, { if let Ok(ref mut cache) = self.map.try_read() { Some(keys_iter.map(|k| cache.get(k).cloned()).collect()) } else { None } } pub(crate) fn get<Q>(&self, key: &Q) -> Option<V> where K: Borrow<Q>, Q: Hash + Eq + ?Sized, { if let Ok(ref mut cache) = self.map.try_read() { cache.get(key).cloned() } else { None } } pub(crate) fn set_values<I>(&self, entries: I) where I: IntoIterator<Item = (K, V)>, { // Before trying to acquire a write lock, we check if we are already at // capacity with a read handler. if let Ok(cache) = self.map.try_read() { if cache.len() >= self.capacity { // At capacity, so do nothing. return; } } else { // If we couldn't acquire a read handle then we probably won't be able to acquire // a write handle one quadrillionth of a second later. return; } // Not at capacity, so try acquiring a write handle. if let Ok(mut cache) = self.map.try_write() { let free = self.capacity - cache.len(); cache.extend(entries.into_iter().take(free)); } } pub(crate) fn set(&self, key: K, value: V) { self.set_values(std::iter::once((key, value))) } }
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/utils/iter.rs
//! This comes from the Rust libcore and is duplicated here because it is not exported //! (cf <https://github.com/rust-lang/rust/blob/25091ed9b7739e12466fb2490baa1e8a2815121c/src/libcore/iter/adapters/mod.rs#L2664>) //! We are now using the version from <https://stackoverflow.com/questions/44544323/how-to-unzip-a-sequence-of-resulta-b-e-to-a-veca-vecb-and-stop-on-f> //! because the one from the libcore seems to cause overflowing stacks in some cases //! It also contains a lines_with_ending that copies std::io::BufRead but keeps line endings. use std::io::BufRead; pub struct ResultShunt<I, E> { iter: I, error: Option<E>, } impl<I, T, E> ResultShunt<I, E> where I: Iterator<Item = Result<T, E>>, { /// Process the given iterator as if it yielded a `T` instead of a /// `Result<T, _>`. Any errors will stop the inner iterator and /// the overall result will be an error. pub fn process<F, U>(iter: I, mut f: F) -> Result<U, E> where F: FnMut(&mut Self) -> U, { let mut shunt = ResultShunt::new(iter); let value = f(shunt.by_ref()); shunt.reconstruct(value) } fn new(iter: I) -> Self { ResultShunt { iter, error: None } } /// Consume the adapter and rebuild a `Result` value. This should /// *always* be called, otherwise any potential error would be /// lost. fn reconstruct<U>(self, val: U) -> Result<U, E> { match self.error { None => Ok(val), Some(e) => Err(e), } } } impl<I, T, E> Iterator for ResultShunt<I, E> where I: Iterator<Item = Result<T, E>>, { type Item = T; fn next(&mut self) -> Option<Self::Item> { match self.iter.next() { Some(Ok(v)) => Some(v), Some(Err(e)) => { self.error = Some(e); None } None => None, } } } /// Copied from std::io::BufRead but keep newline characters. #[derive(Debug)] pub struct Lines<B> { buf: B, } pub trait LinesWithEnding<B> { fn lines_with_ending(self) -> Lines<B>; } impl<B> LinesWithEnding<B> for B where B: BufRead, { fn lines_with_ending(self) -> Lines<B> { Lines::<B> { buf: self } } } impl<B: BufRead> Iterator for Lines<B> { type Item = std::io::Result<String>; fn next(&mut self) -> Option<Self::Item> { let mut buf = String::new(); match self.buf.read_line(&mut buf) { Ok(0) => None, Ok(_n) => { // if buf.ends_with('\n') { // buf.pop(); // if buf.ends_with('\r') { // buf.pop(); // } // } Some(Ok(buf)) } Err(e) => Some(Err(e)), } } }
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/utils/onig.rs
use crate::tokenizer::pattern::Pattern; use crate::{Offsets, Result}; use onig::Regex; use std::error::Error; #[derive(Debug)] pub struct SysRegex { regex: Regex, } impl SysRegex { pub fn find_iter<'r, 't>(&'r self, inside: &'t str) -> onig::FindMatches<'r, 't> { self.regex.find_iter(inside) } pub fn new( regex_str: &str, ) -> std::result::Result<Self, Box<dyn Error + Send + Sync + 'static>> { Ok(Self { regex: Regex::new(regex_str)?, }) } } impl Pattern for &Regex { fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>> { if inside.is_empty() { return Ok(vec![((0, 0), false)]); } let mut prev = 0; let mut splits = Vec::with_capacity(inside.len()); for (start, end) in self.find_iter(inside) { if prev != start { splits.push(((prev, start), false)); } splits.push(((start, end), true)); prev = end; } if prev != inside.len() { splits.push(((prev, inside.len()), false)) } Ok(splits) } }
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/utils/from_pretrained.rs
use crate::Result; use cached_path::CacheBuilder; use itertools::Itertools; use reqwest::{blocking::Client, header}; use std::borrow::Cow; use std::collections::HashMap; use std::path::PathBuf; /// Returns a directory to be used as cache. /// /// If the `TOKENIZERS_CACHE` environment variable is set, we just return it. It is the /// responsibility of the user to make sure this path is correct. /// /// Otherwise, we try to use the default cache directory as defined for each OS: /// - macOS: `/Users/{user}/Library/Caches/huggingface/tokenizers` /// - linux: `/home/{user}/.cache/huggingface/tokenizers` /// - windows: `C:\Users\{user}\AppData\Local\huggingface\tokenizers` /// If the default cache directory cannot be found (if the user HOME folder is not defined), /// then we fall back on a temporary directory fn cache_dir() -> PathBuf { if let Ok(path) = std::env::var("TOKENIZERS_CACHE") { PathBuf::from(path) } else { let mut dir = dirs::cache_dir().unwrap_or_else(std::env::temp_dir); dir.push("huggingface"); dir.push("tokenizers"); dir } } /// Returns a directory to be used as cache, creating it if it doesn't exist /// /// Cf `cache_dir()` to understand how the cache dir is selected. fn ensure_cache_dir() -> std::io::Result<PathBuf> { let dir = cache_dir(); std::fs::create_dir_all(&dir)?; Ok(dir) } /// Sanitize a key or value to be used inside the user_agent /// The user_agent uses `/` and `;` to format the key-values, so we /// replace them by `-` fn sanitize_user_agent(item: &str) -> Cow<str> { let mut sanitized = Cow::Borrowed(item); if sanitized.contains('/') { sanitized = Cow::Owned(sanitized.replace('/', "-")); } if sanitized.contains(';') { sanitized = Cow::Owned(sanitized.replace(';', "-")); } sanitized } const VERSION: &str = env!("CARGO_PKG_VERSION"); // We allow unstable name collisions in this case because we don't care if it // starts using the new stable feature when it will be stable. This feature is // supposed to be a copy of the one we use anyway. // cf https://github.com/rust-lang/rust/issues/79524 #[allow(unstable_name_collisions)] fn user_agent(additional_info: HashMap<String, String>) -> String { let additional_str: String = additional_info .iter() .map(|(k, v)| format!("{}/{}", sanitize_user_agent(k), sanitize_user_agent(v))) .intersperse("; ".to_string()) .collect(); let user_agent = format!( "tokenizers/{}{}", VERSION, if !additional_str.is_empty() { format!("; {}", additional_str) } else { String::new() } ); user_agent } /// Defines the aditional parameters available for the `from_pretrained` function #[derive(Debug, Clone)] pub struct FromPretrainedParameters { pub revision: String, pub user_agent: HashMap<String, String>, pub auth_token: Option<String>, } impl Default for FromPretrainedParameters { fn default() -> Self { Self { revision: "main".into(), user_agent: HashMap::new(), auth_token: None, } } } /// Downloads and cache the identified tokenizer if it exists on /// the Hugging Face Hub, and returns a local path to the file pub fn from_pretrained<S: AsRef<str>>( identifier: S, params: Option<FromPretrainedParameters>, ) -> Result<PathBuf> { let identifier: &str = identifier.as_ref(); let valid_chars = ['-', '_', '.', '/']; let is_valid_char = |x: char| x.is_alphanumeric() || valid_chars.contains(&x); let valid = identifier.chars().all(is_valid_char); let valid_chars_stringified = valid_chars .iter() .fold(vec![], |mut buf, x| { buf.push(format!("'{}'", x)); buf }) .join(", "); // "'/', '-', '_', '.'" if !valid { return Err(format!( "Model \"{}\" contains invalid characters, expected only alphanumeric or {valid_chars_stringified}", identifier ) .into()); } let params = params.unwrap_or_default(); let cache_dir = ensure_cache_dir()?; let revision = &params.revision; let valid_revision = revision.chars().all(is_valid_char); if !valid_revision { return Err(format!( "Revision \"{}\" contains invalid characters, expected only alphanumeric or {valid_chars_stringified}", revision ) .into()); } // Build a custom HTTP Client using our user-agent and custom headers let mut headers = header::HeaderMap::new(); if let Some(ref token) = params.auth_token { headers.insert( "Authorization", header::HeaderValue::from_str(&format!("Bearer {}", token))?, ); } let client_builder = Client::builder() .user_agent(user_agent(params.user_agent)) .default_headers(headers); // Create a cache object let cache = CacheBuilder::with_client_builder(client_builder) .dir(cache_dir) .build()?; let url_to_download = format!( "https://huggingface.co/{}/resolve/{}/tokenizer.json", identifier, revision, ); match cache.cached_path(&url_to_download) { Err(_) => Err(format!( "Model \"{}\" on the Hub doesn't have a tokenizer", identifier ) .into()), Ok(path) => Ok(path), } }
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/utils/progress.rs
#[cfg(feature = "progressbar")] pub(crate) use indicatif::{ProgressBar, ProgressStyle}; #[cfg(not(feature = "progressbar"))] mod progressbar { pub struct ProgressBar; impl ProgressBar { pub fn new(_length: u64) -> Self { Self {} } pub fn set_length(&self, _length: u64) {} pub fn set_draw_delta(&self, _draw_delta: u64) {} pub fn set_message(&self, _message: &str) {} pub fn finish(&self) {} pub fn reset(&self) {} pub fn inc(&self, _inc: u64) {} pub fn set_style(&self, _style: ProgressStyle) {} } pub struct ProgressStyle {} impl ProgressStyle { pub fn default_bar() -> Self { Self {} } pub fn template(self, _template: &str) -> Self { self } } } #[cfg(not(feature = "progressbar"))] pub(crate) use progressbar::{ProgressBar, ProgressStyle};
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/decoders/sequence.rs
use crate::decoders::DecoderWrapper; use crate::tokenizer::{Decoder, Result}; use crate::utils::macro_rules_attribute; use serde::{Deserialize, Serialize}; #[derive(Clone, Debug)] #[macro_rules_attribute(impl_serde_type!)] pub struct Sequence { decoders: Vec<DecoderWrapper>, } impl Sequence { pub fn new(decoders: Vec<DecoderWrapper>) -> Self { Self { decoders } } } impl Decoder for Sequence { fn decode_chain(&self, mut tokens: Vec<String>) -> Result<Vec<String>> { for decoder in &self.decoders { tokens = decoder.decode_chain(tokens)?; } Ok(tokens) } } #[cfg(test)] mod tests { use super::*; use crate::decoders::ctc::CTC; use crate::pre_tokenizers::metaspace::Metaspace; #[test] fn sequence_basic() { let decoders = vec![ DecoderWrapper::CTC(CTC::default()), DecoderWrapper::Metaspace(Metaspace::default()), ]; let decoder = Sequence::new(decoders); let tokens: Vec<String> = vec!["▁", "▁", "H", "H", "i", "i", "▁", "y", "o", "u"] .into_iter() .map(|s| s.to_string()) .collect(); let out_tokens = decoder.decode(tokens).unwrap(); assert_eq!(out_tokens, "Hi you"); } }
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/decoders/mod.rs
pub mod bpe; pub mod byte_fallback; pub mod ctc; pub mod fuse; pub mod sequence; pub mod strip; pub mod wordpiece; // Re-export these as decoders pub use super::pre_tokenizers::byte_level; pub use super::pre_tokenizers::metaspace; use serde::{Deserialize, Serialize}; use crate::decoders::bpe::BPEDecoder; use crate::decoders::byte_fallback::ByteFallback; use crate::decoders::ctc::CTC; use crate::decoders::fuse::Fuse; use crate::decoders::sequence::Sequence; use crate::decoders::strip::Strip; use crate::decoders::wordpiece::WordPiece; use crate::normalizers::replace::Replace; use crate::pre_tokenizers::byte_level::ByteLevel; use crate::pre_tokenizers::metaspace::Metaspace; use crate::{Decoder, Result}; #[derive(Serialize, Deserialize, Clone, Debug)] #[serde(untagged)] pub enum DecoderWrapper { BPE(BPEDecoder), ByteLevel(ByteLevel), WordPiece(WordPiece), Metaspace(Metaspace), CTC(CTC), Sequence(Sequence), Replace(Replace), Fuse(Fuse), Strip(Strip), ByteFallback(ByteFallback), } impl Decoder for DecoderWrapper { fn decode_chain(&self, tokens: Vec<String>) -> Result<Vec<String>> { match self { Self::BPE(bpe) => bpe.decode_chain(tokens), Self::ByteLevel(bl) => bl.decode_chain(tokens), Self::Metaspace(ms) => ms.decode_chain(tokens), Self::WordPiece(wp) => wp.decode_chain(tokens), Self::CTC(ctc) => ctc.decode_chain(tokens), Self::Sequence(seq) => seq.decode_chain(tokens), Self::Replace(seq) => seq.decode_chain(tokens), Self::ByteFallback(bf) => bf.decode_chain(tokens), Self::Strip(bf) => bf.decode_chain(tokens), Self::Fuse(bf) => bf.decode_chain(tokens), } } } impl_enum_from!(BPEDecoder, DecoderWrapper, BPE); impl_enum_from!(ByteLevel, DecoderWrapper, ByteLevel); impl_enum_from!(ByteFallback, DecoderWrapper, ByteFallback); impl_enum_from!(Fuse, DecoderWrapper, Fuse); impl_enum_from!(Strip, DecoderWrapper, Strip); impl_enum_from!(Metaspace, DecoderWrapper, Metaspace); impl_enum_from!(WordPiece, DecoderWrapper, WordPiece); impl_enum_from!(CTC, DecoderWrapper, CTC); impl_enum_from!(Sequence, DecoderWrapper, Sequence); impl_enum_from!(Replace, DecoderWrapper, Replace); #[cfg(test)] mod tests { use super::*; #[test] fn decoder_serialization() { let json = r#"{"type":"Sequence","decoders":[{"type":"ByteFallback"},{"type":"Metaspace","replacement":"▁","add_prefix_space":true}]}"#; let decoder: DecoderWrapper = serde_json::from_str(json).unwrap(); let serialized = serde_json::to_string(&decoder).unwrap(); assert_eq!(serialized, json); } #[test] fn decoder_serialization_other_no_arg() { let json = r#"{"type":"Sequence","decoders":[{"type":"Fuse"},{"type":"Metaspace","replacement":"▁","add_prefix_space":true}]}"#; let decoder: DecoderWrapper = serde_json::from_str(json).unwrap(); let serialized = serde_json::to_string(&decoder).unwrap(); assert_eq!(serialized, json); } #[test] fn decoder_serialization_no_decode() { let json = r#"{"type":"Sequence","decoders":[{},{"type":"Metaspace","replacement":"▁","add_prefix_space":true}]}"#; assert!(serde_json::from_str::<DecoderWrapper>(json).is_err()); } }
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/decoders/wordpiece.rs
use crate::tokenizer::{Decoder, Result}; use serde::{Deserialize, Serialize}; #[derive(Deserialize, Clone, Debug, Serialize)] /// The WordPiece decoder takes care of decoding a list of wordpiece tokens /// back into a readable string. #[serde(tag = "type")] #[non_exhaustive] pub struct WordPiece { /// The prefix to be used for continuing subwords pub prefix: String, /// Whether to cleanup some tokenization artifacts (spaces before punctuation, ...) pub cleanup: bool, } impl WordPiece { pub fn new(prefix: String, cleanup: bool) -> Self { Self { prefix, cleanup } } } impl Default for WordPiece { fn default() -> Self { Self { prefix: "##".to_owned(), cleanup: true, } } } pub fn cleanup(dirty_input: &str) -> String { dirty_input .replace(" .", ".") .replace(" ?", "?") .replace(" !", "!") .replace(" ,", ",") .replace(" ' ", "'") .replace(" n't", "n't") .replace(" 'm", "'m") .replace(" do not", " don't") .replace(" 's", "'s") .replace(" 've", "'ve") .replace(" 're", "'re") } impl Decoder for WordPiece { fn decode_chain(&self, mut tokens: Vec<String>) -> Result<Vec<String>> { tokens .iter_mut() .enumerate() .map(|(i, token)| { if i != 0 { if token.starts_with(&self.prefix) { *token = token.replacen(&self.prefix, "", 1); } else { *token = format!(" {}", token); } } if self.cleanup { *token = cleanup(token); } Ok(token.to_string()) }) .collect::<Result<_>>() } } #[cfg(test)] mod tests { use super::*; #[test] fn wordpiece_decoder() { let decoder = WordPiece::new("##".to_string(), false); assert_eq!( decoder .decode(vec![ "##uelo".to_string(), "Ara".to_string(), "##új".to_string(), "##o".to_string(), "No".to_string(), "##guera".to_string() ]) .unwrap(), "##uelo Araújo Noguera" ); } }
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/decoders/byte_fallback.rs
use crate::tokenizer::{Decoder, Result}; use monostate::MustBe; use serde::{Deserialize, Serialize}; #[derive(Deserialize, Clone, Debug, Serialize, Default)] /// ByteFallback is a simple trick which converts tokens looking like `<0x61>` /// to pure bytes, and attempts to make them into a string. If the tokens /// cannot be decoded you will get � instead for each inconvertable byte token #[non_exhaustive] pub struct ByteFallback { #[serde(rename = "type")] type_: MustBe!("ByteFallback"), } impl ByteFallback { pub fn new() -> Self { Self { type_: MustBe!("ByteFallback"), } } } impl Decoder for ByteFallback { fn decode_chain(&self, tokens: Vec<String>) -> Result<Vec<String>> { let mut new_tokens: Vec<String> = vec![]; let mut previous_byte_tokens: Vec<u8> = vec![]; for token in tokens { let bytes = if token.len() == 6 && token.starts_with("<0x") && token.ends_with('>') { if let Ok(byte) = u8::from_str_radix(&token[3..5], 16) { Some(byte) } else { None } } else { None }; if let Some(bytes) = bytes { previous_byte_tokens.push(bytes); } else { if !previous_byte_tokens.is_empty() { if let Ok(string) = String::from_utf8(previous_byte_tokens.clone()) { new_tokens.push(string); } else { for _ in 0..previous_byte_tokens.len() { new_tokens.push("�".into()); } } previous_byte_tokens.clear(); } new_tokens.push(token); } } if !previous_byte_tokens.is_empty() { if let Ok(string) = String::from_utf8(previous_byte_tokens.clone()) { new_tokens.push(string); } else { for _ in 0..previous_byte_tokens.len() { new_tokens.push("�".into()); } } } Ok(new_tokens) } } #[cfg(test)] mod tests { use super::*; #[test] fn decode() { let decoder = ByteFallback::new(); let res = decoder .decode_chain(vec!["Hey".into(), "friend!".into()]) .unwrap(); assert_eq!(res, vec!["Hey", "friend!"]); let res = decoder.decode_chain(vec!["<0x61>".into()]).unwrap(); assert_eq!(res, vec!["a"]); let res = decoder.decode_chain(vec!["<0xE5>".into()]).unwrap(); assert_eq!(res, vec!["�"]); let res = decoder .decode_chain(vec!["<0xE5>".into(), "<0x8f>".into()]) .unwrap(); assert_eq!(res, vec!["�", "�"]); // 叫 let res = decoder .decode_chain(vec!["<0xE5>".into(), "<0x8f>".into(), "<0xab>".into()]) .unwrap(); assert_eq!(res, vec!["叫"]); let res = decoder .decode_chain(vec![ "<0xE5>".into(), "<0x8f>".into(), "<0xab>".into(), "a".into(), ]) .unwrap(); assert_eq!(res, vec!["叫", "a"]); let res = decoder .decode_chain(vec!["<0xE5>".into(), "<0x8f>".into(), "a".into()]) .unwrap(); assert_eq!(res, vec!["�", "�", "a"]); } }
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/decoders/bpe.rs
use crate::tokenizer::{Decoder, Result}; use serde::{Deserialize, Serialize}; #[derive(Deserialize, Clone, Debug, Serialize)] /// Allows decoding Original BPE by joining all the tokens and then replacing /// the suffix used to identify end-of-words by whitespaces #[serde(tag = "type")] #[non_exhaustive] pub struct BPEDecoder { pub suffix: String, } impl BPEDecoder { pub fn new(suffix: String) -> Self { Self { suffix } } } impl Default for BPEDecoder { fn default() -> Self { Self::new("</w>".into()) } } impl Decoder for BPEDecoder { fn decode_chain(&self, tokens: Vec<String>) -> Result<Vec<String>> { let n = tokens.len() - 1; Ok(tokens .into_iter() .enumerate() .map(|(i, token)| { let replacement = if i == n { "" } else { " " }; token.replace(&self.suffix, replacement) }) .collect()) } }
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/decoders/ctc.rs
use crate::decoders::wordpiece; use crate::tokenizer::{Decoder, Result}; use itertools::Itertools; use serde::{Deserialize, Serialize}; #[derive(Debug, Clone, Serialize, Deserialize)] /// The CTC (Connectionist Temporal Classification) decoder takes care /// of sanitizing a list of inputs token. /// Due to some alignement problem the output of some models can come /// with duplicated token. #[serde(tag = "type")] #[non_exhaustive] pub struct CTC { /// The pad token used by CTC to delimit a new token. pub pad_token: String, /// The word delimiter token. It will be replaced by a `<space>`. pub word_delimiter_token: String, /// Whether to cleanup some tokenization artifacts. /// Mainly spaces before punctuation, and some abbreviated english forms. pub cleanup: bool, } impl CTC { pub fn new(pad_token: String, word_delimiter_token: String, cleanup: bool) -> Self { Self { pad_token, word_delimiter_token, cleanup, } } } impl Default for CTC { fn default() -> Self { Self { pad_token: "<pad>".to_string(), word_delimiter_token: "|".to_string(), cleanup: true, } } } impl Decoder for CTC { fn decode_chain(&self, tokens: Vec<String>) -> Result<Vec<String>> { Ok(tokens .into_iter() .dedup() .filter_map(|token| { let mut replaced = token.replace(&self.pad_token, ""); if self.cleanup { replaced = wordpiece::cleanup(&replaced).replace(&self.word_delimiter_token, " "); } if replaced.is_empty() { None } else { Some(replaced) } }) .collect()) } } #[cfg(test)] mod tests { use super::*; #[test] fn handmade_sample() { let ctc_decoder = CTC::default(); let id_to_string_result = "<pad> <pad> h e e l l <pad> l o o o <pad>" .split(' ') .map(|s| s.to_string()) .collect(); assert_eq!( ctc_decoder.decode_chain(id_to_string_result).unwrap(), vec!["h", "e", "l", "l", "o"] ); } #[test] fn handmade_with_delimiter_sample() { let ctc_decoder = CTC::default(); let id_to_string_result = "<pad> <pad> h e e l l <pad> l o o o <pad> <pad> | <pad> w o o o r <pad> <pad> l l d <pad> <pad> <pad> <pad>" .split(' ') .map(|s| s.to_string()) .collect(); assert_eq!( ctc_decoder.decode_chain(id_to_string_result).unwrap(), vec!["h", "e", "l", "l", "o", " ", "w", "o", "r", "l", "d"] ); } #[test] fn librispeech_sample() { let ctc_decoder = CTC::default(); let id_to_string_result = "<pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> A | | <pad> M <pad> <pad> <pad> <pad> A <pad> <pad> N <pad> <pad> <pad> | | | <pad> <pad> <pad> <pad> S <pad> <pad> <pad> A I <pad> D D | | T T <pad> O <pad> | | T H E E | | | <pad> U U <pad> N N <pad> I <pad> <pad> V <pad> <pad> <pad> E R R <pad> <pad> <pad> S E E | | <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> S S <pad> <pad> <pad> <pad> I <pad> R R <pad> <pad> | | | <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> I <pad> <pad> <pad> | <pad> <pad> <pad> E X <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> I <pad> S <pad> <pad> T <pad> <pad> | | <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad>".split(' ').map(|s| s.to_string()).collect(); assert_eq!( ctc_decoder.decode_chain(id_to_string_result).unwrap(), vec![ "A", " ", "M", "A", "N", " ", "S", "A", "I", "D", " ", "T", "O", " ", "T", "H", "E", " ", "U", "N", "I", "V", "E", "R", "S", "E", " ", "S", "I", "R", " ", "I", " ", "E", "X", "I", "S", "T", " " ] ); } #[test] fn another_librispeech_sample() { let ctc_decoder = CTC::default(); let id_to_string_result = "<pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> H <pad> I <pad> S S | | <pad> <pad> <pad> I N <pad> <pad> S <pad> T T <pad> <pad> A N C C T <pad> | | | | | <pad> <pad> <pad> <pad> P <pad> <pad> <pad> <pad> A <pad> <pad> N N N <pad> <pad> I <pad> C <pad> <pad> | | <pad> W <pad> <pad> A S <pad> | | <pad> <pad> <pad> F <pad> <pad> O L <pad> <pad> L L O O W E E D | | <pad> B <pad> <pad> <pad> Y <pad> | | | A | | <pad> S S S <pad> M M <pad> <pad> <pad> A L L <pad> <pad> <pad> <pad> L <pad> | | | <pad> <pad> <pad> <pad> S H H <pad> <pad> <pad> <pad> A R R <pad> <pad> P <pad> <pad> | <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> B <pad> <pad> L L <pad> <pad> <pad> <pad> <pad> O W W <pad> <pad> | | | <pad> <pad> <pad> <pad> <pad> <pad> <pad> H <pad> <pad> <pad> <pad> <pad> <pad> <pad> I G H H | | <pad> <pad> O N <pad> | | H <pad> I S S | | <pad> <pad> C H H <pad> <pad> <pad> E <pad> S S <pad> T T <pad> <pad> | | | <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad>".split(' ').map(|s| s.to_string()).collect(); assert_eq!( ctc_decoder.decode_chain(id_to_string_result).unwrap(), vec![ "H", "I", "S", " ", "I", "N", "S", "T", "A", "N", "C", "T", " ", "P", "A", "N", "I", "C", " ", "W", "A", "S", " ", "F", "O", "L", "L", "O", "W", "E", "D", " ", "B", "Y", " ", "A", " ", "S", "M", "A", "L", "L", " ", "S", "H", "A", "R", "P", " ", "B", "L", "O", "W", " ", "H", "I", "G", "H", " ", "O", "N", " ", "H", "I", "S", " ", "C", "H", "E", "S", "T", " " ] ); } }
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/decoders/fuse.rs
use crate::tokenizer::{Decoder, Result}; use monostate::MustBe; use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, Serialize, Deserialize, Default)] /// Fuse simply fuses all tokens into one big string. /// It's usually the last decoding step anyway, but this /// decoder exists incase some decoders need to happen after that /// step #[non_exhaustive] pub struct Fuse { #[serde(rename = "type")] type_: MustBe!("Fuse"), } impl Fuse { pub fn new() -> Self { Self { type_: MustBe!("Fuse"), } } } impl Decoder for Fuse { fn decode_chain(&self, tokens: Vec<String>) -> Result<Vec<String>> { let new_string = tokens.join(""); Ok(vec![new_string]) } } #[cfg(test)] mod tests { use super::*; #[test] fn decode() { let decoder = Fuse::new(); let res = decoder .decode_chain(vec!["Hey".into(), " friend!".into()]) .unwrap(); assert_eq!(res, vec!["Hey friend!"]); } }
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/decoders/strip.rs
use crate::tokenizer::{Decoder, Result}; use serde::{Deserialize, Serialize}; #[derive(Deserialize, Clone, Debug, Serialize, Default)] /// Strip is a simple trick which converts tokens looking like `<0x61>` /// to pure bytes, and attempts to make them into a string. If the tokens /// cannot be decoded you will get � instead for each inconvertable byte token #[serde(tag = "type")] #[non_exhaustive] pub struct Strip { pub content: char, pub start: usize, pub stop: usize, } impl Strip { pub fn new(content: char, start: usize, stop: usize) -> Self { Self { content, start, stop, } } } impl Decoder for Strip { fn decode_chain(&self, tokens: Vec<String>) -> Result<Vec<String>> { Ok(tokens .into_iter() .map(|token| { let chars: Vec<char> = token.chars().collect(); let mut start_cut = 0; for (i, &c) in chars.iter().enumerate().take(self.start) { if c == self.content { start_cut = i + 1; continue; } else { break; } } let mut stop_cut = chars.len(); for i in 0..self.stop { let index = chars.len() - i - 1; if chars[index] == self.content { stop_cut = index; continue; } else { break; } } let new_token: String = chars[start_cut..stop_cut].iter().collect(); new_token }) .collect()) } } #[cfg(test)] mod tests { use super::*; #[test] fn decode() { let decoder = Strip::new('H', 1, 0); let res = decoder .decode_chain(vec!["Hey".into(), " friend!".into(), "HHH".into()]) .unwrap(); assert_eq!(res, vec!["ey", " friend!", "HH"]); let decoder = Strip::new('y', 0, 1); let res = decoder .decode_chain(vec!["Hey".into(), " friend!".into()]) .unwrap(); assert_eq!(res, vec!["He", " friend!"]); } }
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/processors/sequence.rs
use crate::processors::PostProcessorWrapper; use crate::tokenizer::{Encoding, PostProcessor, Result}; use crate::utils::macro_rules_attribute; use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, PartialEq, Eq)] #[macro_rules_attribute(impl_serde_type!)] pub struct Sequence { processors: Vec<PostProcessorWrapper>, } impl Sequence { pub fn new(processors: Vec<PostProcessorWrapper>) -> Self { Self { processors } } } impl PostProcessor for Sequence { fn added_tokens(&self, is_pair: bool) -> usize { self.processors .iter() .map(|p| p.added_tokens(is_pair)) .sum::<usize>() } fn process_encodings( &self, mut encodings: Vec<Encoding>, add_special_tokens: bool, ) -> Result<Vec<Encoding>> { for processor in &self.processors { encodings = processor.process_encodings(encodings, add_special_tokens)?; } Ok(encodings) } } #[cfg(test)] mod tests { use super::*; use crate::processors::{ByteLevel, PostProcessorWrapper}; use crate::tokenizer::{Encoding, PostProcessor}; use std::collections::HashMap; use std::iter::FromIterator; #[test] fn process_chain() { let start = Encoding::new( vec![0; 5], vec![0; 5], vec![ "Ġ".into(), "ĠĠĠĠHelloĠĠ".into(), "ĠĠHello".into(), "HelloĠĠ".into(), "ĠĠĠĠ".into(), ], vec![], vec![(0, 1), (0, 11), (11, 18), (18, 25), (25, 29)], vec![], vec![], vec![], HashMap::new(), ); let bytelevel = ByteLevel::default().trim_offsets(true); let sequence = Sequence::new(vec![PostProcessorWrapper::ByteLevel(bytelevel)]); let expected = Encoding::new( vec![0; 5], vec![0; 5], vec![ "Ġ".into(), "ĠĠĠĠHelloĠĠ".into(), "ĠĠHello".into(), "HelloĠĠ".into(), "ĠĠĠĠ".into(), ], vec![], vec![(0, 0), (4, 9), (13, 18), (18, 23), (29, 29)], vec![], vec![], vec![], HashMap::from_iter(vec![(0, 0..5)]), ); assert_eq!( expected, bytelevel.process(start.clone(), None, false).unwrap() ); assert_eq!( expected, sequence.process(start.clone(), None, false).unwrap() ); let pair_expected = Encoding::new( vec![0; 10], vec![0, 0, 0, 0, 0, 1, 1, 1, 1, 1], vec![ "Ġ".into(), "ĠĠĠĠHelloĠĠ".into(), "ĠĠHello".into(), "HelloĠĠ".into(), "ĠĠĠĠ".into(), "Ġ".into(), "ĠĠĠĠHelloĠĠ".into(), "ĠĠHello".into(), "HelloĠĠ".into(), "ĠĠĠĠ".into(), ], vec![], vec![ (0, 0), (4, 9), (13, 18), (18, 23), (29, 29), (0, 0), (4, 9), (13, 18), (18, 23), (29, 29), ], vec![], vec![], vec![], HashMap::from_iter(vec![(0, 0..5), (1, 5..10)]), ); assert_eq!( pair_expected, bytelevel .process(start.clone(), Some(start.clone()), false) .unwrap() ); assert_eq!( pair_expected, sequence.process(start.clone(), Some(start), false).unwrap() ); } }
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/processors/mod.rs
pub mod bert; pub mod roberta; pub mod sequence; pub mod template; // Re-export these as processors pub use super::pre_tokenizers::byte_level; use serde::{Deserialize, Serialize}; use crate::pre_tokenizers::byte_level::ByteLevel; use crate::processors::bert::BertProcessing; use crate::processors::roberta::RobertaProcessing; use crate::processors::sequence::Sequence; use crate::processors::template::TemplateProcessing; use crate::{Encoding, PostProcessor, Result}; #[derive(Serialize, Deserialize, PartialEq, Debug, Clone, Eq)] #[serde(untagged)] pub enum PostProcessorWrapper { // Roberta must be before Bert for deserialization (serde does not validate tags) Roberta(RobertaProcessing), Bert(BertProcessing), ByteLevel(ByteLevel), Template(TemplateProcessing), Sequence(Sequence), } impl PostProcessor for PostProcessorWrapper { fn added_tokens(&self, is_pair: bool) -> usize { match self { Self::Bert(bert) => bert.added_tokens(is_pair), Self::ByteLevel(bl) => bl.added_tokens(is_pair), Self::Roberta(roberta) => roberta.added_tokens(is_pair), Self::Template(template) => template.added_tokens(is_pair), Self::Sequence(bl) => bl.added_tokens(is_pair), } } fn process_encodings( &self, encodings: Vec<Encoding>, add_special_tokens: bool, ) -> Result<Vec<Encoding>> { match self { Self::Bert(bert) => bert.process_encodings(encodings, add_special_tokens), Self::ByteLevel(bl) => bl.process_encodings(encodings, add_special_tokens), Self::Roberta(roberta) => roberta.process_encodings(encodings, add_special_tokens), Self::Template(template) => template.process_encodings(encodings, add_special_tokens), Self::Sequence(bl) => bl.process_encodings(encodings, add_special_tokens), } } } impl_enum_from!(BertProcessing, PostProcessorWrapper, Bert); impl_enum_from!(ByteLevel, PostProcessorWrapper, ByteLevel); impl_enum_from!(RobertaProcessing, PostProcessorWrapper, Roberta); impl_enum_from!(TemplateProcessing, PostProcessorWrapper, Template); impl_enum_from!(Sequence, PostProcessorWrapper, Sequence); #[cfg(test)] mod tests { use super::*; #[test] fn deserialize_bert_roberta_correctly() { let roberta = RobertaProcessing::default(); let roberta_r = r#"{ "type":"RobertaProcessing", "sep":["</s>",2], "cls":["<s>",0], "trim_offsets":true, "add_prefix_space":true }"# .replace(char::is_whitespace, ""); assert_eq!(serde_json::to_string(&roberta).unwrap(), roberta_r); assert_eq!( serde_json::from_str::<PostProcessorWrapper>(&roberta_r).unwrap(), PostProcessorWrapper::Roberta(roberta) ); let bert = BertProcessing::default(); let bert_r = r#"{"type":"BertProcessing","sep":["[SEP]",102],"cls":["[CLS]",101]}"#; assert_eq!(serde_json::to_string(&bert).unwrap(), bert_r); assert_eq!( serde_json::from_str::<PostProcessorWrapper>(bert_r).unwrap(), PostProcessorWrapper::Bert(bert) ); } }
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/processors/roberta.rs
use crate::processors::byte_level::process_offsets; use crate::tokenizer::{Encoding, PostProcessor, Result}; use serde::{Deserialize, Serialize}; use std::collections::HashMap; use std::iter::FromIterator; #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] #[serde(tag = "type")] pub struct RobertaProcessing { sep: (String, u32), cls: (String, u32), trim_offsets: bool, add_prefix_space: bool, } impl Default for RobertaProcessing { fn default() -> Self { Self { sep: ("</s>".into(), 2), cls: ("<s>".into(), 0), trim_offsets: true, add_prefix_space: true, } } } impl RobertaProcessing { pub fn new(sep: (String, u32), cls: (String, u32)) -> Self { Self { sep, cls, ..Default::default() } } #[must_use] pub fn trim_offsets(mut self, v: bool) -> Self { self.trim_offsets = v; self } #[must_use] pub fn add_prefix_space(mut self, v: bool) -> Self { self.add_prefix_space = v; self } } impl PostProcessor for RobertaProcessing { fn added_tokens(&self, is_pair: bool) -> usize { if is_pair { 4 } else { 2 } } fn process_encodings( &self, mut encodings: Vec<Encoding>, add_special_tokens: bool, ) -> Result<Vec<Encoding>> { if self.trim_offsets { for encoding in encodings.iter_mut() { process_offsets(encoding, self.add_prefix_space); encoding .get_overflowing_mut() .iter_mut() .for_each(|encoding| process_offsets(encoding, self.add_prefix_space)); } } // Roberta is weird, and every encoding is type_id=0. encodings .iter_mut() .for_each(|encoding| encoding.set_type_ids(vec![0; encoding.len()])); if !add_special_tokens { return Ok(encodings); } let encodings: Vec<Encoding> = encodings .iter_mut() .enumerate() .map(|(i, encoding)| { if i == 0 { let ids = [&[self.cls.1], encoding.get_ids(), &[self.sep.1]].concat(); let type_ids = [&[0], encoding.get_type_ids(), &[0]].concat(); let tokens = [ &[self.cls.0.clone()], encoding.get_tokens(), &[self.sep.0.clone()], ] .concat(); let words = [&[None], encoding.get_word_ids(), &[None]].concat(); let offsets = [&[(0, 0)], encoding.get_offsets(), &[(0, 0)]].concat(); let special_tokens = [&[1u32], &vec![0; encoding.get_ids().len()][..], &[1]].concat(); let attention_mask = vec![1; ids.len()]; // For compatibility with `TemplateProcessing`, the sequence_ranges shouldn't contain // the special tokens. let sequence_ranges = HashMap::from_iter(vec![(0, 1..ids.len() - 1)]); Encoding::new( ids, type_ids, tokens, words, offsets, special_tokens, attention_mask, encoding .take_overflowing() .into_iter() .map(|encoding| { let ids = [&[self.cls.1], encoding.get_ids(), &[self.sep.1]].concat(); let type_ids = vec![0; encoding.get_ids().len() + 2]; let tokens = [ &[self.cls.0.clone()], encoding.get_tokens(), &[self.sep.0.clone()], ] .concat(); let words = [&[None], encoding.get_word_ids(), &[None]].concat(); let offsets = [&[(0, 0)], encoding.get_offsets(), &[(0, 0)]].concat(); let special_tokens = [&[1u32], &vec![0; encoding.get_ids().len()][..], &[1]] .concat(); let attention_mask = vec![1; ids.len()]; // For compatibility with `TemplateProcessing`, the sequence_ranges shouldn't // contain the special tokens. let sequence_ranges = HashMap::from_iter(vec![(0, 1..ids.len() - 1)]); Encoding::new( ids, type_ids, tokens, words, offsets, special_tokens, attention_mask, vec![], sequence_ranges, ) }) .collect(), sequence_ranges, ) } else { let pair_ids = [&[self.sep.1], encoding.get_ids(), &[self.sep.1]].concat(); let pair_type_ids = vec![0; encoding.get_ids().len() + 2]; let pair_tokens = [ &[self.sep.0.clone()], encoding.get_tokens(), &[self.sep.0.clone()], ] .concat(); let pair_words = [&[None], encoding.get_word_ids(), &[None]].concat(); let pair_offsets = [&[(0, 0)], encoding.get_offsets(), &[(0, 0)]].concat(); let pair_special_tokens = [&[1], &vec![0u32; encoding.get_type_ids().len()][..], &[1]].concat(); let pair_attention_mask = vec![1; pair_ids.len()]; // For compatibility with `TemplateProcessing`, the sequence_ranges shouldn't contain // the special tokens. let pair_sequence_ranges = HashMap::from_iter(vec![(1, 1..pair_ids.len() - 1)]); Encoding::new( pair_ids, pair_type_ids, pair_tokens, pair_words, pair_offsets, pair_special_tokens, pair_attention_mask, encoding .take_overflowing() .into_iter() .map(|encoding| { let pair_ids = [&[self.sep.1], encoding.get_ids(), &[self.sep.1]].concat(); let pair_type_ids = vec![0; encoding.get_ids().len() + 2]; let pair_tokens = [ &[self.sep.0.clone()], encoding.get_tokens(), &[self.sep.0.clone()], ] .concat(); let pair_words = [&[None], encoding.get_word_ids(), &[None]].concat(); let pair_offsets = [&[(0, 0)], encoding.get_offsets(), &[(0, 0)]].concat(); let pair_special_tokens = [&[1], &vec![0u32; encoding.get_type_ids().len()][..], &[1]] .concat(); let pair_attention_mask = vec![1; pair_ids.len()]; // For compatibility with `TemplateProcessing`, the sequence_ranges // shouldn't contain the special tokens. let pair_sequence_ranges = HashMap::from_iter(vec![(1, 1..pair_ids.len() - 1)]); Encoding::new( pair_ids, pair_type_ids, pair_tokens, pair_words, pair_offsets, pair_special_tokens, pair_attention_mask, vec![], pair_sequence_ranges, ) }) .collect(), pair_sequence_ranges, ) } }) .collect(); Ok(encodings) } } #[cfg(test)] mod tests { use super::*; #[test] fn serde() { let roberta = RobertaProcessing::default(); let roberta_r = r#"{ "type":"RobertaProcessing", "sep":["</s>",2], "cls":["<s>",0], "trim_offsets":true, "add_prefix_space":true }"# .replace(char::is_whitespace, ""); assert_eq!(serde_json::to_string(&roberta).unwrap(), roberta_r); assert_eq!( serde_json::from_str::<RobertaProcessing>(&roberta_r).unwrap(), roberta ); } #[test] fn roberta_processing() { let processor = RobertaProcessing::default(); assert_eq!(processor.added_tokens(false), 2); assert_eq!(processor.added_tokens(true), 4); use crate::Token; let encoding = Encoding::from_tokens( vec![ Token::new(12, "Hello".into(), (0, 5)), Token::new(14, "there".into(), (6, 11)), ], 0, ); let pair = Encoding::from_tokens(vec![Token::new(15, "pair".into(), (0, 4))], 0); let single_encoding = processor.process(encoding.clone(), None, true).unwrap(); assert_eq!( single_encoding, Encoding::new( vec![0, 12, 14, 2], vec![0, 0, 0, 0], vec!["<s>".into(), "Hello".into(), "there".into(), "</s>".into()], vec![None, None, None, None], vec![(0, 0), (0, 5), (6, 11), (0, 0)], vec![1, 0, 0, 1], vec![1, 1, 1, 1], vec![], HashMap::from_iter(vec![(0, 1..3)]), ) ); assert_eq!(single_encoding.token_to_sequence(2), Some(0)); assert_eq!(single_encoding.token_to_sequence(3), None); let pair_encoding = processor .process(encoding.clone(), Some(pair.clone()), true) .unwrap(); assert_eq!( pair_encoding, Encoding::new( vec![0, 12, 14, 2, 2, 15, 2], vec![0, 0, 0, 0, 0, 0, 0], vec![ "<s>".into(), "Hello".into(), "there".into(), "</s>".into(), "</s>".into(), "pair".into(), "</s>".into() ], vec![None, None, None, None, None, None, None], vec![(0, 0), (0, 5), (6, 11), (0, 0), (0, 0), (0, 4), (0, 0)], vec![1, 0, 0, 1, 1, 0, 1], vec![1, 1, 1, 1, 1, 1, 1], vec![], HashMap::from_iter(vec![(0, 1..3), (1, 5..6)]), ) ); assert_eq!(pair_encoding.token_to_sequence(2), Some(0)); assert_eq!(pair_encoding.token_to_sequence(3), None); assert_eq!(pair_encoding.token_to_sequence(4), None); assert_eq!(pair_encoding.token_to_sequence(5), Some(1)); assert_eq!(pair_encoding.token_to_sequence(6), None); // No special tokens let pair_encoding = processor.process(encoding, Some(pair), false).unwrap(); assert_eq!( pair_encoding, Encoding::new( vec![12, 14, 15], vec![0, 0, 0], vec!["Hello".into(), "there".into(), "pair".into(),], vec![None, None, None], vec![(0, 5), (6, 11), (0, 4)], vec![0, 0, 0], vec![1, 1, 1], vec![], HashMap::from_iter(vec![(0, 0..2), (1, 2..3)]), ) ); assert_eq!(pair_encoding.token_to_sequence(0), Some(0)); assert_eq!(pair_encoding.token_to_sequence(1), Some(0)); assert_eq!(pair_encoding.token_to_sequence(2), Some(1)); } }
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/processors/template.rs
//! # Template Processing //! //! Provides a way to specify templates in order to add the special tokens to each //! input sequence as relevant. //! //! ## Example //! //! Let's take `BERT` tokenizer as an example. It uses two special tokens, used to //! delimitate each sequence. `[CLS]` is always used at the beginning of the first //! sequence, and `[SEP]` is added at the end of both the first, and the pair //! sequences. The final result looks like this: //! - Single sequence: `[CLS] Hello there [SEP]` //! - Pair sequences: `[CLS] My name is Anthony [SEP] What is my name? [SEP]` //! With the type ids as following: //! ```markdown //! [CLS] ... [SEP] ... [SEP] //! 0 0 0 1 1 //! ``` //! //! So, we can define a [`TemplateProcessing`] that will achieve this result: //! ``` //! # use tokenizers::processors::template::TemplateProcessing; //! let template = TemplateProcessing::builder() //! // The template when we only have a single sequence: //! .try_single(vec!["[CLS]", "$0", "[SEP]"]).unwrap() //! // Same as: //! .try_single("[CLS] $0 [SEP]").unwrap() //! //! // The template when we have both sequences: //! .try_pair(vec!["[CLS]:0", "$A:0", "[SEP]:0", "$B:1", "[SEP]:1"]).unwrap() //! // Same as: //! .try_pair("[CLS]:0 $A:0 [SEP]:0 $B:1 [SEP]:1").unwrap() //! // Or: //! .try_pair("[CLS] $0 [SEP] $B:1 [SEP]:1").unwrap() //! //! // The list of special tokens used by each sequences //! .special_tokens(vec![("[CLS]", 1), ("[SEP]", 0)]) //! .build() //! .unwrap(); //! ``` //! //! In this example, each input sequence is identified using a `$` construct. This identifier //! lets us specify each input sequence, and the type_id to use. When nothing is specified, //! it uses the default values. Here are the different ways to specify it: //! - Specifying the sequence, with default `type_id == 0`: `$A` or `$B` //! - Specifying the `type_id` with default `sequence == A`: `$0`, `$1`, `$2`, ... //! - Specifying both: `$A:0`, `$B:1`, ... //! //! The same construct is used for special tokens: `<identifier>(:<type_id>)?`. //! //! **Warning**: You must ensure that you are giving the correct tokens/ids as these will //! be added to the `Encoding` without any further check. If the given ids correspond to //! something totally different in a `Tokenizer` using this `PostProcessor`, it might lead //! to unexpected results. //! //! [`TemplateProcessing`]: struct.TemplateProcessing.html //! use crate::{Encoding, PostProcessor, Result}; use itertools::Itertools; use serde::{Deserialize, Serialize}; use std::collections::{HashMap, HashSet}; use std::convert::{TryFrom, TryInto}; use std::result::Result as StdResult; /// Represents any sequences received as input of the PostProcessor #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Eq)] pub enum Sequence { /// This is the first sequence, the one that is always specified A, /// This is the pair sequence, that is optional B, } /// Represents the different kind of pieces that constitute a template. /// It can be either the input sequence or a [`SpecialToken`]: /// /// - The `Sequence` has an associated `type_id` which is used by default /// for any token inside this sequence. The `Sequence` corresponds to one /// of the input sequence given as input of the `PostProcessor`. /// /// - The `SpecialToken` has an associated `id`. It corresponds to a [`SpecialToken`]. /// /// The easiest way to build a `Piece` is actually by converting it from a string: /// ``` /// # use tokenizers::processors::template::Piece; /// # use std::convert::TryFrom; /// let sequence_with_type_id_0 = Piece::try_from("$0").unwrap(); /// let sequence_with_type_id_1 = Piece::try_from("$1").unwrap(); /// let special_token_cls = Piece::try_from("[CLS]").unwrap(); /// ``` /// /// [`SpecialToken`]: struct.SpecialToken.html /// #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Eq)] pub enum Piece { Sequence { id: Sequence, type_id: u32 }, SpecialToken { id: String, type_id: u32 }, } impl Piece { fn extract_id(s: &str) -> Option<Self> { if s.starts_with('$') { let rest = &s['$'.len_utf8()..]; // If the id is just `$`, we use 0 as type_id, and Sequence A match rest { "" => Some(Self::Sequence { id: Sequence::A, type_id: 0, }), "A" | "a" => Some(Self::Sequence { id: Sequence::A, type_id: 0, }), "B" | "b" => Some(Self::Sequence { id: Sequence::B, type_id: 0, }), n => { if let Ok(type_id) = n.parse::<u32>() { Some(Self::Sequence { id: Sequence::A, type_id, }) } else { None } } } } else { Some(Self::SpecialToken { id: s.to_owned(), type_id: 0, }) } } fn with_type_id(self, type_id: u32) -> Self { match self { Self::Sequence { id, .. } => Self::Sequence { id, type_id }, Self::SpecialToken { id, .. } => Self::SpecialToken { id, type_id }, } } } impl TryFrom<String> for Piece { type Error = String; fn try_from(s: String) -> StdResult<Self, Self::Error> { let parts = s.split(':').collect::<Vec<_>>(); let err = || format!("Cannot build Piece from string \"{}\"", s); match parts.as_slice() { [id, type_id] => { let type_id: u32 = type_id.parse().map_err(|_| err())?; let piece = Self::extract_id(id).ok_or_else(err)?; Ok(piece.with_type_id(type_id)) } [id] => Self::extract_id(id).ok_or_else(err), _ => Err(err()), } } } impl TryFrom<&str> for Piece { type Error = String; fn try_from(s: &str) -> StdResult<Self, Self::Error> { Piece::try_from(s.to_owned()) } } /// Represents a bunch of tokens to be used in a template. /// Usually, special tokens have only one associated id/token but in /// some cases, it might be interesting to have multiple ids/tokens. /// /// # Examples /// ``` /// # use tokenizers::processors::template::SpecialToken; /// // Simple cases, where a single id/token is necessary: /// let cls = SpecialToken::from(("[CLS]", 1)); /// let sep = SpecialToken::from((0, "[SEP]")); // The order in the tuple is not important /// /// // More complex case with multiple values: /// let complex = SpecialToken::new( /// "A complex special token:".into(), /// vec![0, 1, 2, 3, 4], /// vec!["A".into(), "complex".into(), "special".into(), "token".into(), ":".into()] /// ).unwrap(); /// ``` #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Eq)] pub struct SpecialToken { /// A unique id used to identify this SpecialToken in the template id: String, /// The list of associated ids ids: Vec<u32>, /// The list of associated tokens tokens: Vec<String>, } impl From<(String, u32)> for SpecialToken { fn from(v: (String, u32)) -> Self { Self { id: v.0.clone(), ids: vec![v.1], tokens: vec![v.0], } } } impl From<(&str, u32)> for SpecialToken { fn from(v: (&str, u32)) -> Self { Self::from((v.0.to_owned(), v.1)) } } impl From<(u32, String)> for SpecialToken { fn from(v: (u32, String)) -> Self { Self::from((v.1, v.0)) } } impl From<(u32, &str)> for SpecialToken { fn from(v: (u32, &str)) -> Self { Self::from((v.1.to_owned(), v.0)) } } impl SpecialToken { pub fn new(id: String, ids: Vec<u32>, tokens: Vec<String>) -> Result<Self> { if ids.len() != tokens.len() { Err("SpecialToken: ids and tokens must be of the same length".into()) } else { Ok(Self { id, ids, tokens }) } } } /// A Template represents a Vec<[`Piece`]>. /// /// We can easily build one as follows /// ``` /// # use tokenizers::processors::template::Template; /// # use std::convert::TryFrom; /// // By providing a `String` or `&str`, we just split on whitespaces: /// let template = Template::try_from("[CLS] $0 [SEP]").unwrap(); /// /// // By providing pieces directly: /// let template = Template::try_from(vec!["[CLS]", "$0", "[SEP]"]).unwrap(); /// ``` /// Both of these methods give the same result. /// /// [`Piece`]: enum.Piece.html /// #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Eq)] #[serde(transparent)] pub struct Template(Vec<Piece>); impl<T> TryFrom<Vec<T>> for Template where T: TryInto<Piece, Error = String>, { type Error = String; fn try_from(v: Vec<T>) -> StdResult<Self, Self::Error> { Ok(Self( v.into_iter() .map(|p| p.try_into()) .collect::<StdResult<Vec<_>, Self::Error>>()?, )) } } impl TryFrom<String> for Template { type Error = String; fn try_from(s: String) -> StdResult<Self, Self::Error> { Self::try_from(s.as_ref()) } } impl TryFrom<&str> for Template { type Error = String; fn try_from(s: &str) -> StdResult<Self, Self::Error> { Self::try_from(s.split(' ').collect::<Vec<_>>()) } } /// A bunch of [`SpecialToken`] represented by their ID. /// Internally, `Tokens` is a `HashMap<String, SpecialToken>` and can be built /// from a HashMap or a Vec<[`SpecialToken`]>. /// /// [`SpecialToken`]: struct.SpecialToken.html #[derive(Debug, Clone, PartialEq, Default, Serialize, Deserialize, Eq)] #[serde(transparent)] pub struct Tokens( #[serde(serialize_with = "crate::utils::ordered_map")] pub HashMap<String, SpecialToken>, ); impl<T: Into<SpecialToken>> From<Vec<T>> for Tokens { fn from(v: Vec<T>) -> Self { Self( v.into_iter() .map(|t| { let token: SpecialToken = t.into(); (token.id.clone(), token) }) .collect(), ) } } impl From<HashMap<String, SpecialToken>> for Tokens { fn from(v: HashMap<String, SpecialToken>) -> Self { Self(v) } } /// This PostProcessor takes care of processing each input `Encoding` by applying /// the corresponding template, before merging them in the final Encoding. /// /// A `Template` is actually a sequence of `Piece` that will be /// concatenated together in the given order. Each `Piece` represents either /// one of the input `Encoding` or a `SpecialToken`. /// /// ## Example /// ``` /// # use tokenizers::processors::template::TemplateProcessing; /// let template = TemplateProcessing::builder() /// .try_single("[CLS] $A [SEP]").unwrap() /// .try_pair("[CLS] $A [SEP] $B:1 [SEP]:1").unwrap() /// .special_tokens(vec![("[CLS]", 1), ("[SEP]", 0)]) /// .build() /// .unwrap(); /// ``` /// #[derive(Debug, Clone, PartialEq, Builder, Serialize, Deserialize, Eq)] #[serde(tag = "type", from = "TemplateProcessingDeserializer")] #[builder(build_fn(validate = "Self::validate"))] pub struct TemplateProcessing { #[builder(try_setter, default = "\"$0\".try_into().unwrap()")] single: Template, #[builder(try_setter, default = "\"$A:0 $B:1\".try_into().unwrap()")] pair: Template, #[builder(setter(skip), default = "self.default_added(true)")] #[serde(skip)] added_single: usize, #[builder(setter(skip), default = "self.default_added(false)")] #[serde(skip)] added_pair: usize, #[builder(setter(into), default)] special_tokens: Tokens, } impl From<&str> for TemplateProcessingBuilderError { fn from(e: &str) -> Self { e.to_string().into() } } impl PartialEq for TemplateProcessingBuilderError { fn eq(&self, other: &Self) -> bool { self.to_string() == other.to_string() } } /// We use this custom deserializer to provided the values for `added_single` /// and `added_pair` during deserialization, while not having to serialize them #[doc(hidden)] #[derive(Deserialize)] #[serde(tag = "type")] struct TemplateProcessingDeserializer { single: Template, pair: Template, special_tokens: Tokens, } impl From<TemplateProcessingDeserializer> for TemplateProcessing { fn from(t: TemplateProcessingDeserializer) -> Self { let added_single = count_added(&t.single, Some(&t.special_tokens)); let added_pair = count_added(&t.pair, Some(&t.special_tokens)); Self { single: t.single, pair: t.pair, added_single, added_pair, special_tokens: t.special_tokens, } } } /// Count the number of added tokens in the given template fn count_added(container: &Template, special_tokens: Option<&Tokens>) -> usize { container .0 .iter() .map(|p| match p { Piece::Sequence { .. } => 0, Piece::SpecialToken { id, .. } => { special_tokens.map_or(0, |spt| spt.0.get(id).map_or(0, |s| s.ids.len())) } }) .sum() } impl TemplateProcessingBuilder { fn default_added(&self, is_single: bool) -> usize { let container = if is_single { self.single.as_ref() } else { self.pair.as_ref() }; container.map_or(0, |pieces| { count_added(pieces, self.special_tokens.as_ref()) }) } fn validate(&self) -> std::result::Result<(), String> { let pair_has_both = self.pair.as_ref().map_or(true, |pair| { let mut has_a = false; let mut has_b = false; for piece in &pair.0 { if let Piece::Sequence { id: Sequence::A, .. } = piece { has_a = true; } if let Piece::Sequence { id: Sequence::B, .. } = piece { has_b = true; } } has_a && has_b }); if !pair_has_both { return Err("Template for `pair` must use both sequences".into()); } let check = |sp| { let exist = self .special_tokens .as_ref() .map_or(false, |map| map.0.contains_key(sp)); match exist { false => Some(sp), true => None, } }; let empty = vec![]; let missing: HashSet<&str> = self .single .as_ref() .map_or(empty.iter(), |s| s.0.iter()) .chain(self.pair.as_ref().map_or(empty.iter(), |s| s.0.iter())) .filter_map(|piece| match piece { Piece::Sequence { .. } => None, Piece::SpecialToken { id, .. } => check(id.as_ref()), }) .collect::<HashSet<_>>(); if missing.is_empty() { Ok(()) } else { Err(format!( "Missing SpecialToken(s) with id(s) `{}`", missing.iter().join(", ") )) } } } impl Default for TemplateProcessing { fn default() -> Self { Self { single: "$0".try_into().unwrap(), pair: "$1".try_into().unwrap(), added_single: 0, added_pair: 0, special_tokens: Tokens::default(), } } } impl TemplateProcessing { pub fn builder() -> TemplateProcessingBuilder { TemplateProcessingBuilder::default() } fn apply_template( &self, template: &[Piece], mut encodings: Vec<Encoding>, add_special_tokens: bool, ) -> Result<Vec<Encoding>> { let final_encodings: Vec<Encoding> = template .iter() .flat_map(|piece| { match piece { Piece::Sequence { id, type_id } => { let i = usize::from(*id != Sequence::A); let encoding = &mut encodings[i]; encoding.set_type_ids(vec![*type_id; encoding.len()]); encoding.set_sequence_id(i); Some(encoding.clone()) } Piece::SpecialToken { id, type_id } => { if add_special_tokens { let tok = &self.special_tokens.0[id]; // We already checked existance above let len = tok.ids.len(); let encoding = Encoding::new( tok.ids.clone(), std::iter::repeat(*type_id).take(len).collect(), tok.tokens.clone(), // words std::iter::repeat(None).take(len).collect(), // offsets std::iter::repeat((0, 0)).take(len).collect(), // special_tokens_mask std::iter::repeat(1).take(len).collect(), // attention_mask std::iter::repeat(1).take(len).collect(), // overflowing vec![], // sequence_range HashMap::new(), ); Some(encoding) } else { None } } } }) .collect(); //let mut pair = if encodings.len() > 1 { // Some(encodings.pop().unwrap()) //} else { // None //}; //let mut encoding = encodings.pop().unwrap(); //let pair_overflowing = pair.as_mut().map_or(vec![], |e| e.take_overflowing()); //let mut overflowing: Vec<Encoding> = encoding // .take_overflowing() // .iter() // .map(|encoding| -> Result<Vec<Encoding>> { // // 1. The pair itself // let mut overflowings = self.apply_template( // template, // if encodings.len() > 1 { // vec![encoding.clone(), encodings[1].clone()] // } else { // vec![encoding.clone()] // }, // add_special_tokens, // )?; // // 2. Its overflowings // for other_o in &pair_overflowing { // overflowings.extend(self.apply_template( // template, // vec![encoding.clone(), other_o.clone()], // add_special_tokens, // )?); // } // Ok(overflowings) // }) // .collect::<Result<Vec<Vec<Encoding>>>>()? // .into_iter() // .flatten() // .collect(); //// We also need to combine the first sequence with all other overflowings //overflowing.extend( // pair_overflowing // .into_iter() // .map(|pair| { // self.apply_template(template, vec![encoding.clone(), pair], add_special_tokens) // }) // .collect::<Result<Vec<_>>>()? // .into_iter() // .flatten(), //); Ok(final_encodings) } } impl PostProcessor for TemplateProcessing { fn added_tokens(&self, is_pair: bool) -> usize { if is_pair { self.added_pair } else { self.added_single } } fn process_encodings( &self, encodings: Vec<Encoding>, add_special_tokens: bool, ) -> Result<Vec<Encoding>> { // let (encoding, pair): (Encoding, Option<Encoding>) = match encodings.len() { // 1 => ( // encodings // .pop() // .ok_or(ProcessorError::InvalidEncodingsVecLength)?, // None, // ), // 2 => { // let pair = encodings // .pop() // .ok_or(ProcessorError::InvalidEncodingsVecLength)?; // let encoding = encodings // .pop() // .ok_or(ProcessorError::InvalidEncodingsVecLength)?; // (encoding, Some(pair)) // } // _ => return Err(Box::new(ProcessorError::InvalidEncodingsVecLength)), // }; let template = match encodings.len() { 2 => &self.pair.0, 1 => &self.single.0, _ => todo!(), }; let encodings = self.apply_template(template, encodings, add_special_tokens)?; Ok(encodings) } } #[cfg(test)] mod tests { use super::*; use std::convert::TryInto; use std::iter::FromIterator; #[test] fn piece_serde() { let seq_0 = Piece::Sequence { id: Sequence::A, type_id: 0, }; let seq_0_s = r#"{"Sequence":{"id":"A","type_id":0}}"#; assert_eq!(serde_json::to_string(&seq_0).unwrap(), seq_0_s); assert_eq!(serde_json::from_str::<Piece>(seq_0_s).unwrap(), seq_0); let seq_1 = Piece::Sequence { id: Sequence::B, type_id: 1, }; let seq_1_s = r#"{"Sequence":{"id":"B","type_id":1}}"#; assert_eq!(serde_json::to_string(&seq_1).unwrap(), seq_1_s); assert_eq!(serde_json::from_str::<Piece>(seq_1_s).unwrap(), seq_1); let spe = Piece::SpecialToken { id: "[CLS]".into(), type_id: 0, }; let spe_s = r#"{"SpecialToken":{"id":"[CLS]","type_id":0}}"#; assert_eq!(serde_json::to_string(&spe).unwrap(), spe_s); assert_eq!(serde_json::from_str::<Piece>(spe_s).unwrap(), spe); } #[test] fn piece() { assert_eq!( Ok(Piece::Sequence { id: Sequence::A, type_id: 0 }), "$".try_into() ); assert_eq!( Ok(Piece::Sequence { id: Sequence::B, type_id: 0 }), "$B".try_into() ); assert_eq!( Ok(Piece::Sequence { id: Sequence::A, type_id: 1 }), "$1".try_into() ); assert_eq!( Ok(Piece::Sequence { id: Sequence::B, type_id: 2 }), "$B:2".try_into() ); assert_eq!( Ok(Piece::Sequence { id: Sequence::A, type_id: 1 }), "$:1".try_into() ); assert!(Piece::try_from("$C:1").is_err()); assert!(Piece::try_from("$A:").is_err()); } #[test] fn special_token_serde() { let simple = SpecialToken::from(("[CLS]", 0)); let simple_s = r#"{"id":"[CLS]","ids":[0],"tokens":["[CLS]"]}"#; assert_eq!(serde_json::to_string(&simple).unwrap(), simple_s); assert_eq!( serde_json::from_str::<SpecialToken>(simple_s).unwrap(), simple ); let complete = SpecialToken::new( "[2FR]".into(), vec![1, 2, 3], vec!["convert".into(), "to".into(), "FR".into()], ) .unwrap(); let complete_s = r#"{"id":"[2FR]","ids":[1,2,3],"tokens":["convert","to","FR"]}"#; assert_eq!(serde_json::to_string(&complete).unwrap(), complete_s); assert_eq!( serde_json::from_str::<SpecialToken>(complete_s).unwrap(), complete ); let malformed = SpecialToken::new( "[2FR]".into(), vec![1, 2], vec!["convert".into(), "to".into(), "FR".into()], ); assert!(malformed.is_err()); let malformed = SpecialToken::new( "[2FR]".into(), vec![1, 2, 3], vec!["convert".into(), "FR".into()], ); assert!(malformed.is_err()); } #[test] fn template_serde() { let template = Template(vec![ Piece::Sequence { id: Sequence::A, type_id: 0, }, Piece::SpecialToken { id: "[CLS]".into(), type_id: 0, }, ]); let template_s = r#"[{"Sequence":{"id":"A","type_id":0}},{"SpecialToken":{"id":"[CLS]","type_id":0}}]"#; assert_eq!(serde_json::to_string(&template).unwrap(), template_s); assert_eq!( serde_json::from_str::<Template>(template_s).unwrap(), template ); } #[test] fn tokens_serde() { let tokens = Tokens::from(vec![("[CLS]", 1), ("[SEP]", 0)]); let tokens_s = r#"{"[CLS]":{"id":"[CLS]","ids":[1],"tokens":["[CLS]"]},"[SEP]":{"id":"[SEP]","ids":[0],"tokens":["[SEP]"]}}"#; let tokens_ser = serde_json::to_string(&tokens).unwrap(); assert_eq!(tokens_ser, tokens_s); assert_eq!(serde_json::from_str::<Tokens>(tokens_s).unwrap(), tokens); } fn get_bert_template() -> TemplateProcessing { TemplateProcessing::builder() .try_single(vec!["[CLS]", "$0", "[SEP]"]) .unwrap() .try_pair("[CLS]:0 $A:0 [SEP]:0 $B:1 [SEP]:1") .unwrap() .special_tokens(vec![("[CLS]", 1), ("[SEP]", 0)]) .build() .unwrap() } #[test] fn template_processing_serde() { let template = tests::get_bert_template(); let template_s = "{\ \"type\":\"TemplateProcessing\",\ \"single\":[\ {\"SpecialToken\":{\"id\":\"[CLS]\",\"type_id\":0}},\ {\"Sequence\":{\"id\":\"A\",\"type_id\":0}},\ {\"SpecialToken\":{\"id\":\"[SEP]\",\"type_id\":0}}\ ],\ \"pair\":[\ {\"SpecialToken\":{\"id\":\"[CLS]\",\"type_id\":0}},\ {\"Sequence\":{\"id\":\"A\",\"type_id\":0}},\ {\"SpecialToken\":{\"id\":\"[SEP]\",\"type_id\":0}},\ {\"Sequence\":{\"id\":\"B\",\"type_id\":1}},\ {\"SpecialToken\":{\"id\":\"[SEP]\",\"type_id\":1}}\ ],\ \"special_tokens\":{\ \"[CLS]\":{\ \"id\":\"[CLS]\",\"ids\":[1],\"tokens\":[\"[CLS]\"]\ },\ \"[SEP]\":{\ \"id\":\"[SEP]\",\"ids\":[0],\"tokens\":[\"[SEP]\"]\ }\ }}"; let template_ser = serde_json::to_string(&template).unwrap(); assert_eq!(template_ser, template_s); assert_eq!( serde_json::from_str::<TemplateProcessing>(template_s).unwrap(), template ); } #[test] fn missing_special_tokens() { let processor = TemplateProcessing::builder() .try_single("[CLS] $0 [SEP]") .unwrap() .try_pair("[CLS] $A:0 [SEP] $B:1 [SEP]") .unwrap() .build(); let err_a = Err("Missing SpecialToken(s) with id(s) `[SEP], [CLS]`".into()); let err_b = Err("Missing SpecialToken(s) with id(s) `[CLS], [SEP]`".into()); assert!(processor == err_a || processor == err_b); } #[test] fn template_processing() { let processor = tests::get_bert_template(); assert_eq!(processor.added_tokens(false), 2); assert_eq!(processor.added_tokens(true), 3); use crate::Token; let encoding = Encoding::from_tokens( vec![ Token::new(12, "Hello".into(), (0, 5)), Token::new(14, "there".into(), (6, 11)), ], 0, ); let pair = Encoding::from_tokens(vec![Token::new(15, "pair".into(), (0, 4))], 0); let single_encoding = processor.process(encoding.clone(), None, true).unwrap(); assert_eq!( single_encoding, Encoding::new( vec![1, 12, 14, 0], vec![0, 0, 0, 0], vec![ "[CLS]".into(), "Hello".into(), "there".into(), "[SEP]".into() ], vec![None, None, None, None], vec![(0, 0), (0, 5), (6, 11), (0, 0)], vec![1, 0, 0, 1], vec![1, 1, 1, 1], vec![], HashMap::from_iter(vec![(0, 1..3)]), ) ); assert_eq!(single_encoding.token_to_sequence(2), Some(0)); assert_eq!(single_encoding.token_to_sequence(3), None); let pair_encoding = processor.process(encoding, Some(pair), true).unwrap(); assert_eq!( pair_encoding, Encoding::new( vec![1, 12, 14, 0, 15, 0], vec![0, 0, 0, 0, 1, 1], vec![ "[CLS]".into(), "Hello".into(), "there".into(), "[SEP]".into(), "pair".into(), "[SEP]".into() ], vec![None, None, None, None, None, None], vec![(0, 0), (0, 5), (6, 11), (0, 0), (0, 4), (0, 0)], vec![1, 0, 0, 1, 0, 1], vec![1, 1, 1, 1, 1, 1], vec![], HashMap::from_iter(vec![(0, 1..3), (1, 4..5)]), ) ); assert_eq!(pair_encoding.token_to_sequence(2), Some(0)); assert_eq!(pair_encoding.token_to_sequence(3), None); assert_eq!(pair_encoding.token_to_sequence(4), Some(1)); assert_eq!(pair_encoding.token_to_sequence(5), None); } #[test] fn template_processing_overflowing() { let processor = tests::get_bert_template(); assert_eq!(processor.added_tokens(false), 2); assert_eq!(processor.added_tokens(true), 3); use crate::Token; let mut encoding = Encoding::from_tokens( vec![ Token::new(12, "Hello".into(), (0, 5)), Token::new(14, "there".into(), (6, 11)), ], 0, ); let overflowing = Encoding::from_tokens(vec![Token::new(13, "you".into(), (12, 15))], 0); encoding.set_overflowing(vec![overflowing]); let mut pair = Encoding::from_tokens( vec![ Token::new(15, "pair".into(), (0, 4)), Token::new(16, "with".into(), (5, 9)), ], 0, ); let pair_overflowing = Encoding::from_tokens(vec![Token::new(17, "info".into(), (10, 14))], 0); pair.set_overflowing(vec![pair_overflowing]); let single_encoding = processor.process(encoding.clone(), None, true).unwrap(); assert_eq!( single_encoding, Encoding::new( vec![1, 12, 14, 0], vec![0, 0, 0, 0], vec![ "[CLS]".into(), "Hello".into(), "there".into(), "[SEP]".into() ], vec![None, None, None, None], vec![(0, 0), (0, 5), (6, 11), (0, 0)], vec![1, 0, 0, 1], vec![1, 1, 1, 1], vec![Encoding::new( vec![1, 13, 0], vec![0, 0, 0], vec!["[CLS]".into(), "you".into(), "[SEP]".into()], vec![None, None, None], vec![(0, 0), (12, 15), (0, 0)], vec![1, 0, 1], vec![1, 1, 1], vec![], HashMap::from_iter(vec![(0, 1..2)]), )], HashMap::from_iter(vec![(0, 1..3)]), ) ); assert_eq!(single_encoding.token_to_sequence(2), Some(0)); assert_eq!(single_encoding.token_to_sequence(3), None); let pair_encoding = processor.process(encoding, Some(pair), true).unwrap(); println!("{pair_encoding:#?}"); assert_eq!( pair_encoding, Encoding::new( vec![1, 12, 14, 0, 15, 16, 0], vec![0, 0, 0, 0, 1, 1, 1], vec![ "[CLS]".into(), "Hello".into(), "there".into(), "[SEP]".into(), "pair".into(), "with".into(), "[SEP]".into() ], vec![None, None, None, None, None, None, None], vec![(0, 0), (0, 5), (6, 11), (0, 0), (0, 4), (5, 9), (0, 0)], vec![1, 0, 0, 1, 0, 0, 1], vec![1, 1, 1, 1, 1, 1, 1], vec![ Encoding::new( vec![1, 13, 0, 15, 16, 0], vec![0, 0, 0, 1, 1, 1], vec![ "[CLS]".into(), "you".into(), "[SEP]".into(), "pair".into(), "with".into(), "[SEP]".into() ], vec![None, None, None, None, None, None], vec![(0, 0), (12, 15), (0, 0), (0, 4), (5, 9), (0, 0)], vec![1, 0, 1, 0, 0, 1], vec![1, 1, 1, 1, 1, 1], vec![Encoding::new( vec![1, 13, 0, 17, 0], vec![0, 0, 0, 0, 1], vec![ "[CLS]".into(), "you".into(), "[SEP]".into(), "info".into(), "[SEP]".into() ], vec![None, None, None, None, None,], vec![(0, 0), (12, 15), (0, 0), (10, 14), (0, 0)], vec![1, 0, 1, 0, 1], vec![1, 1, 1, 1, 1], vec![], HashMap::from_iter(vec![(0, 1..2), (1, 3..4)]), ),], HashMap::from_iter(vec![(1, 3..5), (0, 1..2)]), ), Encoding::new( vec![1, 13, 0, 17, 0], vec![0, 0, 0, 0, 1], vec![ "[CLS]".into(), "you".into(), "[SEP]".into(), "info".into(), "[SEP]".into() ], vec![None, None, None, None, None,], vec![(0, 0), (12, 15), (0, 0), (10, 14), (0, 0)], vec![1, 0, 1, 0, 1], vec![1, 1, 1, 1, 1], vec![], HashMap::from_iter(vec![(0, 1..2), (1, 3..4)]), ), Encoding::new( vec![1, 12, 14, 0, 17, 0], vec![0, 0, 0, 0, 0, 1], vec![ "[CLS]".into(), "Hello".into(), "there".into(), "[SEP]".into(), "info".into(), "[SEP]".into() ], vec![None, None, None, None, None, None], vec![(0, 0), (0, 5), (6, 11), (0, 0), (10, 14), (0, 0)], vec![1, 0, 0, 1, 0, 1], vec![1, 1, 1, 1, 1, 1], vec![Encoding::new( vec![1, 13, 0, 17, 0], vec![0, 0, 0, 0, 1], vec![ "[CLS]".into(), "you".into(), "[SEP]".into(), "info".into(), "[SEP]".into() ], vec![None, None, None, None, None,], vec![(0, 0), (12, 15), (0, 0), (10, 14), (0, 0)], vec![1, 0, 1, 0, 1], vec![1, 1, 1, 1, 1], vec![], HashMap::from_iter(vec![(0, 1..2), (1, 3..4)]), ),], HashMap::from_iter(vec![(0, 1..3), (1, 4..5)]), ) ], HashMap::from_iter(vec![(0, 1..3), (1, 4..6)]), ) ); assert_eq!(pair_encoding.token_to_sequence(2), Some(0)); assert_eq!(pair_encoding.token_to_sequence(3), None); assert_eq!(pair_encoding.token_to_sequence(4), Some(1)); assert_eq!(pair_encoding.token_to_sequence(5), Some(1)); assert_eq!(pair_encoding.token_to_sequence(6), None); } #[test] fn pair_must_use_both_sequences() { let processor = TemplateProcessing::builder() .try_single("$0") .unwrap() .try_pair("$0 $1") .unwrap() .build(); assert_eq!( processor, Err("Template for `pair` must use both sequences".into()) ); } #[test] fn expect_wrong_error_message() { let processor = TemplateProcessing::builder() .try_single("$0") .unwrap() .try_pair("$0 $1") .unwrap() .build(); assert_ne!( processor, Err("Expect the left side error message to be different from the right side!".into()) ); } }
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/processors/bert.rs
use crate::tokenizer::{Encoding, PostProcessor, Result}; use serde::{Deserialize, Serialize}; use std::collections::HashMap; use std::iter::FromIterator; #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] #[serde(tag = "type")] pub struct BertProcessing { sep: (String, u32), cls: (String, u32), } impl Default for BertProcessing { fn default() -> Self { Self { sep: ("[SEP]".into(), 102), cls: ("[CLS]".into(), 101), } } } impl BertProcessing { pub fn new(sep: (String, u32), cls: (String, u32)) -> Self { Self { sep, cls } } } #[derive(thiserror::Error, Debug)] pub enum BertProcessorError { #[error("encodings vector length must be either 1 or 2")] InvalidEncodingsVecLength, } impl PostProcessor for BertProcessing { fn added_tokens(&self, is_pair: bool) -> usize { if is_pair { 3 } else { 2 } } fn process_encodings( &self, mut encodings: Vec<Encoding>, add_special_tokens: bool, ) -> Result<Vec<Encoding>> { if !add_special_tokens { return Ok(encodings); } let encodings: Vec<Encoding> = encodings .iter_mut() .enumerate() .map(|(i, encoding)| { if i == 0 { let ids = [&[self.cls.1], encoding.get_ids(), &[self.sep.1]].concat(); let type_ids = [&[0], encoding.get_type_ids(), &[0]].concat(); let tokens = [ &[self.cls.0.clone()], encoding.get_tokens(), &[self.sep.0.clone()], ] .concat(); let words = [&[None], encoding.get_word_ids(), &[None]].concat(); let offsets = [&[(0, 0)], encoding.get_offsets(), &[(0, 0)]].concat(); let special_tokens = [&[1u32], &vec![0; encoding.get_ids().len()][..], &[1]].concat(); let attention_mask = vec![1; ids.len()]; // For compatibility with `TemplateProcessing`, the sequence_ranges shouldn't contain // the special tokens. let sequence_ranges = HashMap::from_iter(vec![(0, 1..ids.len() - 1)]); Encoding::new( ids, type_ids, tokens, words, offsets, special_tokens, attention_mask, encoding .take_overflowing() .into_iter() .map(|encoding| { let ids = [&[self.cls.1], encoding.get_ids(), &[self.sep.1]].concat(); let type_ids = [&[0], encoding.get_type_ids(), &[0]].concat(); let tokens = [ &[self.cls.0.clone()], encoding.get_tokens(), &[self.sep.0.clone()], ] .concat(); let words = [&[None], encoding.get_word_ids(), &[None]].concat(); let offsets = [&[(0, 0)], encoding.get_offsets(), &[(0, 0)]].concat(); let special_tokens = [&[1u32], &vec![0; encoding.get_ids().len()][..], &[1]] .concat(); let attention_mask = vec![1; ids.len()]; // For compatibility with `TemplateProcessing`, the sequence_ranges shouldn't // contain the special tokens. let sequence_ranges = HashMap::from_iter(vec![(0, 1..ids.len() - 1)]); Encoding::new( ids, type_ids, tokens, words, offsets, special_tokens, attention_mask, vec![], sequence_ranges, ) }) .collect(), sequence_ranges, ) } else { let pair_ids = [encoding.get_ids(), &[self.sep.1]].concat(); let pair_type_ids = [encoding.get_type_ids(), &[1]].concat(); let pair_tokens = [encoding.get_tokens(), &[self.sep.0.clone()]].concat(); let pair_words = [encoding.get_word_ids(), &[None]].concat(); let pair_offsets = [encoding.get_offsets(), &[(0, 0)]].concat(); let pair_special_tokens = [&vec![0u32; encoding.get_type_ids().len()][..], &[1]].concat(); let pair_attention_mask = vec![1; pair_ids.len()]; // For compatibility with `TemplateProcessing`, the sequence_ranges shouldn't contain // the special tokens. let pair_sequence_ranges = HashMap::from_iter(vec![(1, 0..pair_ids.len() - 1)]); Encoding::new( pair_ids, pair_type_ids, pair_tokens, pair_words, pair_offsets, pair_special_tokens, pair_attention_mask, encoding .take_overflowing() .into_iter() .map(|encoding| { let pair_ids = [encoding.get_ids(), &[self.sep.1]].concat(); let pair_type_ids = [encoding.get_type_ids(), &[1]].concat(); let pair_tokens = [encoding.get_tokens(), &[self.sep.0.clone()]].concat(); let pair_words = [encoding.get_word_ids(), &[None]].concat(); let pair_offsets = [encoding.get_offsets(), &[(0, 0)]].concat(); let pair_special_tokens = [&vec![0u32; encoding.get_type_ids().len()][..], &[1]].concat(); let pair_attention_mask = vec![1; pair_ids.len()]; // For compatibility with `TemplateProcessing`, the sequence_ranges // shouldn't contain the special tokens. let pair_sequence_ranges = HashMap::from_iter(vec![(1, 0..pair_ids.len() - 1)]); Encoding::new( pair_ids, pair_type_ids, pair_tokens, pair_words, pair_offsets, pair_special_tokens, pair_attention_mask, vec![], pair_sequence_ranges, ) }) .collect(), pair_sequence_ranges, ) } }) .collect(); Ok(encodings) } } #[cfg(test)] mod tests { use super::*; #[test] fn serde() { let bert = BertProcessing::default(); let bert_r = r#"{"type":"BertProcessing","sep":["[SEP]",102],"cls":["[CLS]",101]}"#; assert_eq!(serde_json::to_string(&bert).unwrap(), bert_r); assert_eq!( serde_json::from_str::<BertProcessing>(bert_r).unwrap(), bert ); } #[test] fn bert_processing() { let processor = BertProcessing::default(); assert_eq!(processor.added_tokens(false), 2); assert_eq!(processor.added_tokens(true), 3); use crate::Token; let encoding = Encoding::from_tokens( vec![ Token::new(12, "Hello".into(), (0, 5)), Token::new(14, "there".into(), (6, 11)), ], 0, ); let pair = Encoding::from_tokens(vec![Token::new(15, "pair".into(), (0, 4))], 0); let single_encoding = processor.process(encoding.clone(), None, true).unwrap(); assert_eq!( single_encoding, Encoding::new( vec![101, 12, 14, 102], vec![0, 0, 0, 0], vec![ "[CLS]".into(), "Hello".into(), "there".into(), "[SEP]".into() ], vec![None, None, None, None], vec![(0, 0), (0, 5), (6, 11), (0, 0)], vec![1, 0, 0, 1], vec![1, 1, 1, 1], vec![], HashMap::from_iter(vec![(0, 1..3)]), ) ); assert_eq!(single_encoding.token_to_sequence(2), Some(0)); assert_eq!(single_encoding.token_to_sequence(3), None); let pair_encoding = processor .process(encoding.clone(), Some(pair.clone()), true) .unwrap(); assert_eq!( pair_encoding, Encoding::new( vec![101, 12, 14, 102, 15, 102], vec![0, 0, 0, 0, 1, 1], vec![ "[CLS]".into(), "Hello".into(), "there".into(), "[SEP]".into(), "pair".into(), "[SEP]".into() ], vec![None, None, None, None, None, None], vec![(0, 0), (0, 5), (6, 11), (0, 0), (0, 4), (0, 0)], vec![1, 0, 0, 1, 0, 1], vec![1, 1, 1, 1, 1, 1], vec![], HashMap::from_iter(vec![(0, 1..3), (1, 4..5)]), ) ); assert_eq!(pair_encoding.token_to_sequence(2), Some(0)); assert_eq!(pair_encoding.token_to_sequence(3), None); assert_eq!(pair_encoding.token_to_sequence(4), Some(1)); assert_eq!(pair_encoding.token_to_sequence(5), None); // No special tokens let pair_encoding = processor.process(encoding, Some(pair), false).unwrap(); assert_eq!( pair_encoding, Encoding::new( vec![12, 14, 15], vec![0, 0, 1], vec!["Hello".into(), "there".into(), "pair".into(),], vec![None, None, None], vec![(0, 5), (6, 11), (0, 4)], vec![0, 0, 0], vec![1, 1, 1], vec![], HashMap::from_iter(vec![(0, 0..2), (1, 2..3)]), ) ); assert_eq!(pair_encoding.token_to_sequence(0), Some(0)); assert_eq!(pair_encoding.token_to_sequence(1), Some(0)); assert_eq!(pair_encoding.token_to_sequence(2), Some(1)); } }
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/digits.rs
use serde::{Deserialize, Serialize}; use crate::tokenizer::{PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior}; use crate::utils::macro_rules_attribute; #[derive(Clone, Debug, PartialEq, Eq)] /// Pre tokenizes the numbers into single tokens. If individual_digits is set /// to true, then all digits are splitted into individual tokens. #[non_exhaustive] #[macro_rules_attribute(impl_serde_type!)] pub struct Digits { pub individual_digits: bool, } impl Digits { pub fn new(individual_digits: bool) -> Self { Self { individual_digits } } } impl Default for Digits { fn default() -> Self { Self::new(false) } } impl PreTokenizer for Digits { fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> { if self.individual_digits { pretokenized.split(|_, normalized| { normalized.split(char::is_numeric, SplitDelimiterBehavior::Isolated) }) } else { pretokenized.split(|_, normalized| { normalized.split(char::is_numeric, SplitDelimiterBehavior::Contiguous) }) } } } #[cfg(test)] mod tests { use super::*; use crate::{OffsetReferential, OffsetType}; #[test] fn numbers() { let pretok = Digits::new(false); let mut pretokenized = PreTokenizedString::from("Hey 123 friend!"); pretok.pre_tokenize(&mut pretokenized).unwrap(); assert_eq!( pretokenized .get_splits(OffsetReferential::Normalized, OffsetType::Byte) .into_iter() .map(|(s, o, _)| (s, o)) .collect::<Vec<_>>(), vec![("Hey ", (0, 4)), ("123", (4, 7)), (" friend!", (7, 15))] ); assert_eq!( pretokenized .get_splits(OffsetReferential::Original, OffsetType::Byte) .into_iter() .map(|(s, o, _)| (s, o)) .collect::<Vec<_>>(), vec![("Hey ", (0, 4)), ("123", (4, 7)), (" friend!", (7, 15))] ); } #[test] fn individual_digits() { let pretok = Digits::new(true); let mut pretokenized = PreTokenizedString::from("Hey 123 friend!"); pretok.pre_tokenize(&mut pretokenized).unwrap(); assert_eq!( pretokenized .get_splits(OffsetReferential::Normalized, OffsetType::Byte) .into_iter() .map(|(s, o, _)| (s, o)) .collect::<Vec<_>>(), vec![ ("Hey ", (0, 4)), ("1", (4, 5)), ("2", (5, 6)), ("3", (6, 7)), (" friend!", (7, 15)) ] ); assert_eq!( pretokenized .get_splits(OffsetReferential::Original, OffsetType::Byte) .into_iter() .map(|(s, o, _)| (s, o)) .collect::<Vec<_>>(), vec![ ("Hey ", (0, 4)), ("1", (4, 5)), ("2", (5, 6)), ("3", (6, 7)), (" friend!", (7, 15)) ] ); } }
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/sequence.rs
use crate::pre_tokenizers::PreTokenizerWrapper; use crate::tokenizer::{PreTokenizedString, PreTokenizer, Result}; use crate::utils::macro_rules_attribute; use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, PartialEq)] #[macro_rules_attribute(impl_serde_type!)] pub struct Sequence { pretokenizers: Vec<PreTokenizerWrapper>, } impl Sequence { pub fn new(pretokenizers: Vec<PreTokenizerWrapper>) -> Self { Self { pretokenizers } } } impl PreTokenizer for Sequence { fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> { for pretokenizer in &self.pretokenizers { pretokenizer.pre_tokenize(pretokenized)?; } Ok(()) } } #[cfg(test)] mod tests { use super::*; use crate::pre_tokenizers::{punctuation::Punctuation, whitespace::WhitespaceSplit}; use crate::{OffsetReferential, OffsetType}; #[test] fn sequence_basic() { let pretokenizers = vec![ PreTokenizerWrapper::WhitespaceSplit(WhitespaceSplit), PreTokenizerWrapper::Punctuation(Punctuation::default()), ]; let pretok = Sequence::new(pretokenizers); let mut pretokenized: PreTokenizedString = "Hey friend! How are you?!?".into(); pretok.pre_tokenize(&mut pretokenized).unwrap(); assert_eq!( pretokenized .get_splits(OffsetReferential::Original, OffsetType::Byte) .into_iter() .map(|(s, o, _)| (s, o)) .collect::<Vec<_>>(), vec![ ("Hey", (0, 3)), ("friend", (4, 10)), ("!", (10, 11)), ("How", (16, 19)), ("are", (20, 23)), ("you", (24, 27)), ("?", (27, 28)), ("!", (28, 29)), ("?", (29, 30)), ] ); } }
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/mod.rs
pub mod bert; pub mod byte_level; pub mod delimiter; pub mod digits; pub mod metaspace; pub mod punctuation; pub mod sequence; pub mod split; pub mod unicode_scripts; pub mod whitespace; use serde::{Deserialize, Serialize}; use crate::pre_tokenizers::bert::BertPreTokenizer; use crate::pre_tokenizers::byte_level::ByteLevel; use crate::pre_tokenizers::delimiter::CharDelimiterSplit; use crate::pre_tokenizers::digits::Digits; use crate::pre_tokenizers::metaspace::Metaspace; use crate::pre_tokenizers::punctuation::Punctuation; use crate::pre_tokenizers::sequence::Sequence; use crate::pre_tokenizers::split::Split; use crate::pre_tokenizers::unicode_scripts::UnicodeScripts; use crate::pre_tokenizers::whitespace::{Whitespace, WhitespaceSplit}; use crate::{PreTokenizedString, PreTokenizer}; #[derive(Deserialize, Serialize, Clone, Debug, PartialEq)] #[serde(untagged)] pub enum PreTokenizerWrapper { BertPreTokenizer(BertPreTokenizer), ByteLevel(ByteLevel), Delimiter(CharDelimiterSplit), Metaspace(Metaspace), Whitespace(Whitespace), Sequence(Sequence), Split(Split), Punctuation(Punctuation), WhitespaceSplit(WhitespaceSplit), Digits(Digits), UnicodeScripts(UnicodeScripts), } impl PreTokenizer for PreTokenizerWrapper { fn pre_tokenize(&self, normalized: &mut PreTokenizedString) -> crate::Result<()> { match self { Self::BertPreTokenizer(bpt) => bpt.pre_tokenize(normalized), Self::ByteLevel(bpt) => bpt.pre_tokenize(normalized), Self::Delimiter(dpt) => dpt.pre_tokenize(normalized), Self::Metaspace(mspt) => mspt.pre_tokenize(normalized), Self::Whitespace(wspt) => wspt.pre_tokenize(normalized), Self::Punctuation(tok) => tok.pre_tokenize(normalized), Self::Sequence(tok) => tok.pre_tokenize(normalized), Self::Split(tok) => tok.pre_tokenize(normalized), Self::WhitespaceSplit(wspt) => wspt.pre_tokenize(normalized), Self::Digits(wspt) => wspt.pre_tokenize(normalized), Self::UnicodeScripts(us) => us.pre_tokenize(normalized), } } } impl_enum_from!(BertPreTokenizer, PreTokenizerWrapper, BertPreTokenizer); impl_enum_from!(ByteLevel, PreTokenizerWrapper, ByteLevel); impl_enum_from!(CharDelimiterSplit, PreTokenizerWrapper, Delimiter); impl_enum_from!(Whitespace, PreTokenizerWrapper, Whitespace); impl_enum_from!(Punctuation, PreTokenizerWrapper, Punctuation); impl_enum_from!(Sequence, PreTokenizerWrapper, Sequence); impl_enum_from!(Split, PreTokenizerWrapper, Split); impl_enum_from!(Metaspace, PreTokenizerWrapper, Metaspace); impl_enum_from!(WhitespaceSplit, PreTokenizerWrapper, WhitespaceSplit); impl_enum_from!(Digits, PreTokenizerWrapper, Digits); impl_enum_from!(UnicodeScripts, PreTokenizerWrapper, UnicodeScripts); #[cfg(test)] mod tests { use super::*; #[test] fn test_deserialize() { let pre_tokenizer: PreTokenizerWrapper = serde_json::from_str(r#"{"type":"Sequence","pretokenizers":[{"type":"WhitespaceSplit"},{"type":"Metaspace","replacement":"▁","str_rep":"▁","add_prefix_space":true}]}"#).unwrap(); assert_eq!( pre_tokenizer, PreTokenizerWrapper::Sequence(Sequence::new(vec![ PreTokenizerWrapper::WhitespaceSplit(WhitespaceSplit {}), PreTokenizerWrapper::Metaspace(Metaspace::new('▁', true)) ])) ); let pre_tokenizer: PreTokenizerWrapper = serde_json::from_str( r#"{"type":"Metaspace","replacement":"▁","add_prefix_space":true}"#, ) .unwrap(); assert_eq!( pre_tokenizer, PreTokenizerWrapper::Metaspace(Metaspace::new('▁', true)) ); let pre_tokenizer: PreTokenizerWrapper = serde_json::from_str(r#"{"type":"Sequence","pretokenizers":[{"type":"WhitespaceSplit"},{"type":"Metaspace","replacement":"▁","add_prefix_space":true}]}"#).unwrap(); assert_eq!( pre_tokenizer, PreTokenizerWrapper::Sequence(Sequence::new(vec![ PreTokenizerWrapper::WhitespaceSplit(WhitespaceSplit {}), PreTokenizerWrapper::Metaspace(Metaspace::new('▁', true)) ])) ); } #[test] fn test_deserialize_whitespace_split() { let pre_tokenizer: PreTokenizerWrapper = serde_json::from_str(r#"{"type":"WhitespaceSplit"}"#).unwrap(); assert_eq!( pre_tokenizer, PreTokenizerWrapper::WhitespaceSplit(WhitespaceSplit {}) ); } }
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/delimiter.rs
use serde::{Deserialize, Serialize}; use crate::tokenizer::{PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior}; use crate::utils::macro_rules_attribute; #[derive(Copy, Clone, Debug, PartialEq, Eq)] #[non_exhaustive] #[macro_rules_attribute(impl_serde_type!)] pub struct CharDelimiterSplit { pub delimiter: char, } impl CharDelimiterSplit { pub fn new(delimiter: char) -> Self { Self { delimiter } } } impl PreTokenizer for CharDelimiterSplit { fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> { // TODO: Maybe add the option to specify the behavior pretokenized.split(|_, normalized| { normalized.split(self.delimiter, SplitDelimiterBehavior::Removed) }) } }
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/whitespace.rs
use regex::Regex; use crate::tokenizer::{ pattern::Invert, PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior, }; use crate::utils::macro_rules_attribute; #[derive(Clone, Debug, PartialEq, Eq)] #[macro_rules_attribute(impl_serde_type!)] pub struct Whitespace; impl Default for Whitespace { fn default() -> Self { Self } } impl PreTokenizer for Whitespace { fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> { lazy_static! { static ref RE: Regex = Regex::new(r"\w+|[^\w\s]+").unwrap(); } let re_ref: &Regex = &RE; pretokenized.split(|_, normalized| { normalized.split(Invert(re_ref), SplitDelimiterBehavior::Removed) }) } } #[derive(Copy, Clone, Debug, PartialEq, Eq)] #[macro_rules_attribute(impl_serde_type!)] pub struct WhitespaceSplit; impl PreTokenizer for WhitespaceSplit { fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> { pretokenized.split(|_, normalized| { normalized.split(char::is_whitespace, SplitDelimiterBehavior::Removed) }) } } #[cfg(test)] mod tests { use super::*; use crate::{OffsetReferential, OffsetType, PreTokenizer}; #[test] fn basic() { let tests = vec![ ( "Hey man!", vec![("Hey", (0, 3)), ("man", (4, 7)), ("!", (7, 8))], ), ( "How are you doing?", vec![ ("How", (0, 3)), ("are", (4, 7)), ("you", (8, 11)), ("doing", (12, 17)), ("?", (17, 18)), ], ), ("\n", vec![]), ]; let pretok = Whitespace {}; for (s, res) in tests { let mut pretokenized = PreTokenizedString::from(s); pretok.pre_tokenize(&mut pretokenized).unwrap(); assert_eq!( pretokenized .get_splits(OffsetReferential::Original, OffsetType::Byte) .into_iter() .map(|(s, o, _)| (s, o)) .collect::<Vec<_>>(), res ); } } #[test] fn whitespace_split() { let tests = vec![ ("Hey man!", vec![("Hey", (0, 3)), ("man!", (4, 8))]), ( "Hey, man, Good?", vec![("Hey,", (0, 4)), ("man,", (5, 9)), ("Good?", (10, 15))], ), ]; let pretok = WhitespaceSplit; for (s, res) in tests { let mut pretokenized = PreTokenizedString::from(s); pretok.pre_tokenize(&mut pretokenized).unwrap(); assert_eq!( pretokenized .get_splits(OffsetReferential::Original, OffsetType::Byte) .into_iter() .map(|(s, o, _)| (s, o)) .collect::<Vec<_>>(), res ); } } }
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/punctuation.rs
use serde::{Deserialize, Serialize}; use crate::tokenizer::{PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior}; use crate::utils::macro_rules_attribute; use unicode_categories::UnicodeCategories; fn is_punc(x: char) -> bool { char::is_ascii_punctuation(&x) || x.is_punctuation() } #[derive(Copy, Clone, Debug, PartialEq, Eq)] #[macro_rules_attribute(impl_serde_type!)] pub struct Punctuation { #[serde(default = "default_split")] behavior: SplitDelimiterBehavior, } fn default_split() -> SplitDelimiterBehavior { SplitDelimiterBehavior::Isolated } impl Punctuation { pub fn new(behavior: SplitDelimiterBehavior) -> Self { Self { behavior } } } impl Default for Punctuation { fn default() -> Self { Self::new(SplitDelimiterBehavior::Isolated) } } impl PreTokenizer for Punctuation { fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> { pretokenized.split(|_, s| s.split(is_punc, self.behavior)) } } #[cfg(test)] mod tests { use super::*; use crate::{OffsetReferential, OffsetType}; #[test] fn punctuation_basic() { let pretok = Punctuation::default(); let mut pretokenized: PreTokenizedString = "Hey friend! How are you?!?".into(); pretok.pre_tokenize(&mut pretokenized).unwrap(); assert_eq!( pretokenized .get_splits(OffsetReferential::Original, OffsetType::Byte) .into_iter() .map(|(s, o, _)| (s, o)) .collect::<Vec<_>>(), vec![ ("Hey friend", (0, 10)), ("!", (10, 11)), (" How are you", (11, 27)), ("?", (27, 28)), ("!", (28, 29)), ("?", (29, 30)), ] ); } #[test] fn deserialization() { let punctuation: Punctuation = serde_json::from_str(r#"{"type": "Punctuation"}"#).unwrap(); assert_eq!(punctuation, Punctuation::default()); assert_eq!( punctuation, Punctuation::new(SplitDelimiterBehavior::Isolated) ); } #[test] #[should_panic] fn deserialization_erroneous() { let _punctuation: Punctuation = serde_json::from_str(r#"{"type": "WhitespaceSplit"}"#).unwrap(); } }
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/byte_level.rs
use std::collections::{HashMap, HashSet}; use crate::utils::SysRegex; use serde::{Deserialize, Serialize}; use crate::tokenizer::{ Decoder, Encoding, PostProcessor, PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior, }; use crate::utils::macro_rules_attribute; fn bytes_char() -> HashMap<u8, char> { let mut bs: Vec<u8> = vec![]; bs.extend(b'!'..=b'~'); bs.extend(b'\xA1'..=b'\xAC'); bs.extend(b'\xAE'..=b'\xFF'); let mut cs: Vec<u32> = bs.iter().map(|i| *i as u32).collect(); let mut n = 0; for b in 0..=255u8 { if !bs.contains(&b) { bs.push(b); cs.push(u32::pow(2, 8) + n); n += 1; } } bs.into_iter() .zip(cs) .map(|(f, t)| (f, unsafe { std::char::from_u32_unchecked(t) })) .collect() } lazy_static! { static ref RE: SysRegex = SysRegex::new( r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) .unwrap(); static ref BYTES_CHAR: HashMap<u8, char> = bytes_char(); static ref CHAR_BYTES: HashMap<char, u8> = bytes_char().into_iter().map(|(c, b)| (b, c)).collect(); } #[derive(Copy, Clone, Debug, PartialEq, Eq)] /// Provides all the necessary steps to handle the BPE tokenization at the byte-level. Takes care /// of all the required processing steps to transform a UTF-8 string as needed before and after the /// BPE model does its job. #[macro_rules_attribute(impl_serde_type!)] #[non_exhaustive] pub struct ByteLevel { /// Whether to add a leading space to the first word. This allows to treat the leading word /// just as any other word. pub add_prefix_space: bool, /// Whether the post processing step should trim offsets to avoid including whitespaces. pub trim_offsets: bool, /// Whether to use the standard GPT2 regex for whitespace splitting /// Set it to False if you want to use your own splitting. #[serde(default = "default_true")] pub use_regex: bool, } fn default_true() -> bool { true } impl Default for ByteLevel { fn default() -> Self { Self { add_prefix_space: true, trim_offsets: true, use_regex: true, } } } impl ByteLevel { pub fn new(add_prefix_space: bool, trim_offsets: bool, use_regex: bool) -> Self { Self { add_prefix_space, trim_offsets, use_regex, } } pub fn alphabet() -> HashSet<char> { BYTES_CHAR.values().copied().collect() } #[must_use] pub fn add_prefix_space(mut self, v: bool) -> Self { self.add_prefix_space = v; self } #[must_use] pub fn trim_offsets(mut self, v: bool) -> Self { self.trim_offsets = v; self } #[must_use] pub fn use_regex(mut self, v: bool) -> Self { self.use_regex = v; self } } /// As a `PreTokenizer`, `ByteLevel` is in charge of transforming all the unicode characters into /// their byte-level counterpart. It also splits the input according to the configured regex. // TODO: Give the ability to modify this regex impl PreTokenizer for ByteLevel { fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> { let re_ref: &SysRegex = &RE; pretokenized.split(|_, mut normalized| { if self.add_prefix_space && !normalized.get().starts_with(' ') { normalized.prepend(" "); } if self.use_regex { normalized.split(re_ref, SplitDelimiterBehavior::Isolated) } else { Ok(vec![normalized]) } })?; pretokenized.normalize(|normalized| { let s = normalized.get(); let mut transformations: Vec<(char, isize)> = Vec::with_capacity(s.len()); let mut i = 0; for cur_char in s.chars() { let size = cur_char.len_utf8(); let bytes = s[i..i + size].as_bytes(); i += size; transformations.extend( bytes .iter() .enumerate() .map(|(i, b)| (BYTES_CHAR[b], isize::from(i > 0))), ); } normalized.transform(transformations.into_iter(), 0); Ok(()) }) } } /// As a `Decoder`, `ByteLevel` is in charge of converting any byte-level characters to their /// unicode counterpart, before merging everything back into a single String. /// This decoder will consume the tokens and merge them in one step to alleviate /// the fact that single token decoded might be a byte not representable as /// as String. impl Decoder for ByteLevel { fn decode_chain(&self, tokens: Vec<String>) -> Result<Vec<String>> { let toks = tokens .into_iter() .flat_map(|t| { t.chars() .try_fold(vec![], |mut acc, c| { CHAR_BYTES.get(&c).map(|b| { acc.push(*b); acc }) }) .unwrap_or_else(|| t.as_bytes().to_vec()) }) .collect::<Vec<u8>>(); Ok(vec![String::from_utf8_lossy(&toks).to_string()]) } } /// As a `PostProcessor`, `ByteLevel` is in charge of trimming the offsets if necessary. impl PostProcessor for ByteLevel { fn added_tokens(&self, _is_pair: bool) -> usize { 0 } fn process_encodings( &self, mut encodings: Vec<Encoding>, _add_special_tokens: bool, ) -> Result<Vec<Encoding>> { if self.trim_offsets { for encoding in encodings.iter_mut() { process_offsets(encoding, self.add_prefix_space); encoding .get_overflowing_mut() .iter_mut() .for_each(|encoding| process_offsets(encoding, self.add_prefix_space)); } } for (i, encoding) in encodings.iter_mut().enumerate() { encoding.set_sequence_id(i); } Ok(encodings) //<dyn PostProcessor>::default_process(encodings, add_special_tokens) } } pub fn process_offsets(encoding: &mut Encoding, add_prefix_space: bool) { encoding.process_tokens_with_offsets_mut(|(i, (token, offsets))| { let mut leading_spaces = token .chars() .take_while(|c| *c == BYTES_CHAR[&b' '] || c.is_whitespace()) .count(); let trailing_spaces = token .chars() .rev() .take_while(|c| *c == BYTES_CHAR[&b' '] || c.is_whitespace()) .count(); if leading_spaces > 0 || trailing_spaces > 0 { if leading_spaces > 0 { // If user uses `is_pretokenized=True` we might have // offsets that might begin at the start of the string but are // NOT the first token. let is_first = i == 0 || offsets.0 == 0; if is_first && add_prefix_space && leading_spaces == 1 { // If we are processing the first pair of offsets, with `add_prefix_space`, // then we shouldn't remove anything we added. If there are more than one // leading spaces though, it means we didn't add them, and they should be // removed. leading_spaces = 0; } offsets.0 = std::cmp::min(offsets.0 + leading_spaces, offsets.1); } if trailing_spaces > 0 && offsets.1 >= trailing_spaces { offsets.1 = std::cmp::max(offsets.1 - trailing_spaces, offsets.0); } } }); } #[cfg(test)] mod tests { use super::*; use crate::tokenizer::{ Decoder, Encoding, OffsetReferential, OffsetType, PostProcessor, PreTokenizedString, PreTokenizer, }; use std::iter::FromIterator; #[test] fn pre_tokenization() { let bytelevel = ByteLevel::default().add_prefix_space(false); let mut pretokenized: PreTokenizedString = "Hello my friend, how is your day going?".into(); bytelevel.pre_tokenize(&mut pretokenized).unwrap(); assert_eq!( pretokenized .get_splits(OffsetReferential::Original, OffsetType::Byte) .into_iter() .map(|(s, o, _)| (s, o)) .collect::<Vec<_>>(), vec![ ("Hello", (0, 5)), ("Ġmy", (5, 8)), ("Ġfriend", (8, 15)), (",", (15, 16)), ("Ġhow", (16, 20)), ("Ġis", (20, 23)), ("Ġyour", (23, 28)), ("Ġday", (28, 32)), ("Ġgoing", (32, 38)), ("?", (38, 39)) ] ); } #[test] fn pre_tokenization_no_regex() { let bytelevel = ByteLevel::default().use_regex(false); let mut pretokenized: PreTokenizedString = "Hello my friend, how is your day going?".into(); bytelevel.pre_tokenize(&mut pretokenized).unwrap(); assert_eq!( pretokenized .get_splits(OffsetReferential::Original, OffsetType::Byte) .into_iter() .map(|(s, o, _)| (s, o)) .collect::<Vec<_>>(), vec![("ĠHelloĠmyĠfriend,ĠhowĠisĠyourĠdayĠgoing?", (0, 39))] ); } #[test] fn decoding() { let bytelevel = ByteLevel::default().add_prefix_space(false); assert_eq!( bytelevel .decode_chain( vec![ "Hello", "Ġmy", "Ġfriend", ",", "Ġhow", "Ġis", "Ġyour", "Ġday", "Ġgoing", "?" ] .into_iter() .map(|s| s.into()) .collect::<Vec<String>>() ) .unwrap(), vec!["Hello my friend, how is your day going?"] ); } #[test] fn add_prefix_space() { let bytelevel = ByteLevel::default().add_prefix_space(true); for s in &[ " Hello my friend, how is your day going?", "Hello my friend, how is your day going?", ] { let mut pretokenized = PreTokenizedString::from(*s); bytelevel.pre_tokenize(&mut pretokenized).unwrap(); assert_eq!( pretokenized .get_splits(OffsetReferential::Normalized, OffsetType::Byte) .into_iter() .map(|(s, o, _)| (s, o)) .collect::<Vec<_>>(), vec![ ("ĠHello", (0, 7)), ("Ġmy", (7, 11)), ("Ġfriend", (11, 19)), (",", (19, 20)), ("Ġhow", (20, 25)), ("Ġis", (25, 29)), ("Ġyour", (29, 35)), ("Ġday", (35, 40)), ("Ġgoing", (40, 47)), ("?", (47, 48)) ] ); } } #[test] fn decode_works_on_separated_tokens() { let samples = vec![ "A Nuskhuri abbreviation of იესუ ქრისტე ( iesu kriste ) \" Jesus Christ \"", "An equal number have descenders , like p or q in English \ : გ , დ , ე , ვ , კ , ლ , ჟ , ტ , უ , ფ , ღ , ყ , ც", ]; let bytelevel = ByteLevel::default().add_prefix_space(false); for sample in samples { let mut pretokenized = PreTokenizedString::from(sample); bytelevel.pre_tokenize(&mut pretokenized).unwrap(); let separated_tokens = pretokenized .get_splits(OffsetReferential::Original, OffsetType::Byte) .iter() .flat_map(|(s, _, _)| s.split("").map(|t| t.into())) .collect::<Vec<_>>(); assert_eq!( sample, bytelevel.decode_chain(separated_tokens).unwrap().join("") ); } } #[test] fn handling_of_newlines() { let mut pretokenized = PreTokenizedString::from("Hello there\nHello there"); let bytelevel = ByteLevel::default().add_prefix_space(false); bytelevel.pre_tokenize(&mut pretokenized).unwrap(); assert_eq!( pretokenized .get_splits(OffsetReferential::Original, OffsetType::Byte) .into_iter() .map(|(s, o, _)| (s, o)) .collect::<Vec<_>>(), vec![ ("Hello", (0, 5)), ("Ġthere", (5, 11)), ("Ċ", (11, 12)), ("Hello", (12, 17)), ("Ġthere", (17, 23)) ] ); } #[test] fn handling_of_multiple_whitespaces() { let mut pretokenized = PreTokenizedString::from("Hello there dear"); let bytelevel = ByteLevel::default().add_prefix_space(false); bytelevel.pre_tokenize(&mut pretokenized).unwrap(); assert_eq!( pretokenized .get_splits(OffsetReferential::Original, OffsetType::Byte) .into_iter() .map(|(s, o, _)| (s, o)) .collect::<Vec<_>>(), vec![ ("Hello", (0, 5)), ("Ġthere", (5, 11)), ("ĠĠĠĠĠĠ", (11, 17)), ("Ġdear", (17, 22)) ] ); } #[test] fn offsets_when_char_split_up() { let input = "i⭢j"; let mut pretokenized = PreTokenizedString::from(input); let bytelevel = ByteLevel::default().add_prefix_space(false); bytelevel.pre_tokenize(&mut pretokenized).unwrap(); assert_eq!( pretokenized .get_splits(OffsetReferential::Original, OffsetType::Byte) .into_iter() .map(|(s, o, _)| (s, o)) .collect::<Vec<_>>(), vec![("i", (0, 1)), ("âŃ¢", (1, 4)), ("j", (4, 5))] ); assert_eq!( pretokenized .get_splits(OffsetReferential::Normalized, OffsetType::Byte) .into_iter() .map(|(s, o, _)| (s, o)) .collect::<Vec<_>>(), vec![("i", (0, 1)), ("âŃ¢", (1, 7)), ("j", (7, 8))] ); assert_eq!( pretokenized .get_splits(OffsetReferential::Original, OffsetType::Byte) .into_iter() .map(|(_, o, _)| &input[o.0..o.1]) .collect::<Vec<_>>(), vec!["i", "⭢", "j"] ); } #[test] fn processor_trims_offsets_pre_tokenized() { // If user uses `is_pretokenized=True` we might have // offsets that might begin at the start of the string but are // NOT the first token. let mut encoding = Encoding::new( vec![0; 5], vec![], vec!["Ġl".into(), "ove".into(), "Ġl".into(), "ove".into()], vec![], vec![(0, 1), (1, 4), (0, 1), (1, 4)], vec![], vec![], vec![], HashMap::new(), ); process_offsets(&mut encoding, true); assert_eq!( encoding, Encoding::new( vec![0; 5], vec![], vec!["Ġl".into(), "ove".into(), "Ġl".into(), "ove".into()], vec![], vec![(0, 1), (1, 4), (0, 1), (1, 4)], vec![], vec![], vec![], HashMap::new(), ) ); } #[test] fn processor_trims_offsets() { let start = Encoding::new( vec![0; 5], vec![], vec![ "Ġ".into(), "ĠĠĠĠHelloĠĠ".into(), "ĠĠHello".into(), "HelloĠĠ".into(), "ĠĠĠĠ".into(), ], vec![], vec![(0, 1), (0, 11), (11, 18), (18, 25), (25, 29)], vec![], vec![], vec![], HashMap::new(), ); let expected = Encoding::new( vec![0; 5], vec![0; 5], vec![ "Ġ".into(), "ĠĠĠĠHelloĠĠ".into(), "ĠĠHello".into(), "HelloĠĠ".into(), "ĠĠĠĠ".into(), ], vec![], vec![(0, 0), (4, 9), (13, 18), (18, 23), (29, 29)], vec![], vec![], vec![], HashMap::from_iter(vec![(0, 0..5)]), ); let bytelevel = ByteLevel::default().trim_offsets(true); assert_eq!( expected, bytelevel.process(start.clone(), None, false).unwrap() ); let pair_expected = Encoding::new( vec![0; 10], vec![0, 0, 0, 0, 0, 1, 1, 1, 1, 1], vec![ "Ġ".into(), "ĠĠĠĠHelloĠĠ".into(), "ĠĠHello".into(), "HelloĠĠ".into(), "ĠĠĠĠ".into(), "Ġ".into(), "ĠĠĠĠHelloĠĠ".into(), "ĠĠHello".into(), "HelloĠĠ".into(), "ĠĠĠĠ".into(), ], vec![], vec![ (0, 0), (4, 9), (13, 18), (18, 23), (29, 29), (0, 0), (4, 9), (13, 18), (18, 23), (29, 29), ], vec![], vec![], vec![], HashMap::from_iter(vec![(0, 0..5), (1, 5..10)]), ); assert_eq!( pair_expected, bytelevel .process(start.clone(), Some(start), false) .unwrap() ); } #[test] fn decode_unknown_characters() { let byte_level = ByteLevel::default(); assert_eq!( byte_level .decode_chain(vec![ "Hello".into(), "Ġthere".into(), "Ġdear".into(), "Ġfriend!".into(), "Ġ".into(), "[PA D]".into() ]) .unwrap(), vec!["Hello there dear friend! [PA D]"] ); } #[test] fn deserialization() { // Before use_regex let byte_level: ByteLevel = serde_json::from_str( r#"{"type": "ByteLevel", "add_prefix_space": true, "trim_offsets": false}"#, ) .unwrap(); assert!(byte_level.use_regex); // Loading works, new future BC test. let byte_level: ByteLevel = serde_json::from_str( r#"{"type": "ByteLevel", "add_prefix_space": true, "trim_offsets": false, "use_regex": true}"#, ) .unwrap(); assert!(byte_level.use_regex); let byte_level: ByteLevel = serde_json::from_str( r#"{"type": "ByteLevel", "add_prefix_space": true, "trim_offsets": false, "use_regex": false}"#, ) .unwrap(); assert!(!byte_level.use_regex); } }
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/split.rs
use crate::utils::SysRegex; use serde::{Deserialize, Deserializer, Serialize}; use crate::tokenizer::{ pattern::Invert, PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior, }; /// Represents the different patterns that `Split` can use #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Eq)] pub enum SplitPattern { String(String), Regex(String), } impl From<String> for SplitPattern { fn from(v: String) -> Self { Self::String(v) } } impl From<&str> for SplitPattern { fn from(v: &str) -> Self { Self::String(v.to_owned()) } } #[derive(Debug, Serialize)] #[serde(tag = "type")] pub struct Split { pattern: SplitPattern, #[serde(skip)] regex: SysRegex, behavior: SplitDelimiterBehavior, invert: bool, } impl<'de> Deserialize<'de> for Split { fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error> where D: Deserializer<'de>, { #[derive(Deserialize)] enum Type { Split, } #[derive(Deserialize)] pub struct SplitHelper { #[serde(rename = "type")] _type: Type, pattern: SplitPattern, behavior: SplitDelimiterBehavior, invert: bool, } let helper = SplitHelper::deserialize(deserializer)?; Self::new(helper.pattern, helper.behavior, helper.invert).map_err(serde::de::Error::custom) } } impl Clone for Split { fn clone(&self) -> Self { Self::new(self.pattern.clone(), self.behavior, self.invert).unwrap() } } impl PartialEq for Split { fn eq(&self, other: &Self) -> bool { self.pattern == other.pattern && self.behavior == other.behavior && self.invert == other.invert } } impl Split { pub fn new<I: Into<SplitPattern>>( pattern: I, behavior: SplitDelimiterBehavior, invert: bool, ) -> Result<Self> { let pattern: SplitPattern = pattern.into(); let regex = match &pattern { SplitPattern::String(s) => SysRegex::new(&regex::escape(s))?, SplitPattern::Regex(r) => SysRegex::new(r)?, }; Ok(Self { pattern, regex, behavior, invert, }) } } impl PreTokenizer for Split { fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> { if self.invert { pretokenized.split(|_, normalized| normalized.split(Invert(&self.regex), self.behavior)) } else { pretokenized.split(|_, normalized| normalized.split(&self.regex, self.behavior)) } } } #[cfg(test)] mod tests { use super::*; use crate::{OffsetReferential, OffsetType, PreTokenizer}; use SplitDelimiterBehavior::*; #[test] fn basic() { let tests = vec![ ( Removed, "How are you doing?", vec![ ("How", (0, 3)), ("are", (4, 7)), ("you", (8, 11)), ("doing", (12, 17)), ("?", (17, 18)), ], ), ( Isolated, "How are you doing?", vec![ ("How", (0, 3)), (" ", (3, 4)), ("are", (4, 7)), (" ", (7, 8)), ("you", (8, 11)), (" ", (11, 12)), ("doing", (12, 17)), ("?", (17, 18)), ], ), ( MergedWithPrevious, "How are you doing?", vec![ ("How ", (0, 4)), ("are ", (4, 8)), ("you ", (8, 12)), ("doing", (12, 17)), ("?", (17, 18)), ], ), ( MergedWithNext, "How are you doing?", vec![ ("How", (0, 3)), (" are", (3, 7)), (" you", (7, 11)), (" doing", (11, 17)), ("?", (17, 18)), ], ), ( Contiguous, "How are you doing?", vec![ ("How", (0, 3)), (" ", (3, 4)), ("are", (4, 7)), (" ", (7, 8)), ("you", (8, 11)), (" ", (11, 12)), ("doing?", (12, 18)), ], ), ]; // use whitespace regex let regex = SplitPattern::Regex(r"\w+|[^\w\s]+".into()); for (behavior, s, res) in tests { let mut pretokenized = PreTokenizedString::from(s); let pretok = Split::new(regex.clone(), behavior, true).unwrap(); pretok.pre_tokenize(&mut pretokenized).unwrap(); assert_eq!( pretokenized .get_splits(OffsetReferential::Original, OffsetType::Byte) .into_iter() .map(|(s, o, _)| (s, o)) .collect::<Vec<_>>(), res ); } } #[test] fn regex_string() { let mut pretok_str_for_regex = PreTokenizedString::from("Hey, man!"); let mut pretok_str_for_string = pretok_str_for_regex.clone(); // pre-tokenizer splits on " " - one from Regex, one from string let pretokenizer_regex = Split::new( SplitPattern::Regex(r"\s+".into()), SplitDelimiterBehavior::Removed, false, ) .unwrap(); let pretokenizer_string = Split::new(" ", SplitDelimiterBehavior::Removed, false).unwrap(); pretokenizer_regex .pre_tokenize(&mut pretok_str_for_regex) .unwrap(); pretokenizer_string .pre_tokenize(&mut pretok_str_for_string) .unwrap(); assert_eq!(pretok_str_for_regex, pretok_str_for_string); } #[test] fn invert() { let mut pretok_str = PreTokenizedString::from("Hello Hello Hello"); let mut pretok_str_for_invert = pretok_str.clone(); // one pre-tokenizer splits on " " - one splits inverted on "Hello" let pretokenizer = Split::new(" ", SplitDelimiterBehavior::Removed, false).unwrap(); let pretokenizer_invert = Split::new("Hello", SplitDelimiterBehavior::Removed, true).unwrap(); pretokenizer.pre_tokenize(&mut pretok_str).unwrap(); pretokenizer_invert .pre_tokenize(&mut pretok_str_for_invert) .unwrap(); assert_eq!(pretok_str, pretok_str_for_invert); } #[test] fn serialization() { use SplitDelimiterBehavior::*; let split = Split::new("Hello", Removed, true).unwrap(); let split_s = r#"{"type":"Split","pattern":{"String":"Hello"},"behavior":"Removed","invert":true}"#; assert_eq!(serde_json::to_string(&split).unwrap(), split_s); assert_eq!(serde_json::from_str::<Split>(split_s).unwrap(), split); let split = Split::new(SplitPattern::Regex(r"\s+".into()), Isolated, false).unwrap(); let split_s = r#"{"type":"Split","pattern":{"Regex":"\\s+"},"behavior":"Isolated","invert":false}"#; assert_eq!(serde_json::to_string(&split).unwrap(), split_s); assert_eq!(serde_json::from_str::<Split>(split_s).unwrap(), split); } }
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/metaspace.rs
use serde::{Deserialize, Deserializer, Serialize}; use crate::tokenizer::{Decoder, PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior}; #[derive(Debug, Clone, PartialEq, Serialize, Eq)] /// Replaces all the whitespaces by the provided meta character and then /// splits on this character #[serde(tag = "type")] pub struct Metaspace { replacement: char, pub add_prefix_space: bool, #[serde(skip)] str_rep: String, } impl<'de> Deserialize<'de> for Metaspace { fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error> where D: Deserializer<'de>, { #[derive(Deserialize)] enum Type { Metaspace, } #[derive(Deserialize)] pub struct MetaspaceHelper { #[serde(rename = "type")] _type: Type, replacement: char, pub add_prefix_space: bool, #[serde(skip, rename = "str_rep")] _str_rep: String, } let helper = MetaspaceHelper::deserialize(deserializer)?; Ok(Self::new(helper.replacement, helper.add_prefix_space)) } } impl Metaspace { pub fn new(replacement: char, add_prefix_space: bool) -> Self { Self { replacement, str_rep: replacement.to_string(), add_prefix_space, } } pub fn get_replacement(&self) -> char { self.replacement } pub fn set_replacement(&mut self, replacement: char) { self.replacement = replacement; self.str_rep = replacement.to_string(); } } impl Default for Metaspace { fn default() -> Self { Self::new('▁', true) } } impl PreTokenizer for Metaspace { fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> { pretokenized.split(|_, mut normalized| { normalized.replace(' ', &self.str_rep)?; if self.add_prefix_space && !normalized.get().starts_with(self.replacement) { normalized.prepend(&self.str_rep); } normalized.split(self.replacement, SplitDelimiterBehavior::MergedWithNext) }) } } impl Decoder for Metaspace { fn decode_chain(&self, tokens: Vec<String>) -> Result<Vec<String>> { Ok(tokens .iter() .enumerate() .map(|(i, token)| { token .chars() .flat_map(|c| { if c == self.replacement { if i == 0 && self.add_prefix_space { None } else { Some(' ') } } else { Some(c) } }) .collect::<String>() }) .collect()) } } #[cfg(test)] mod tests { use super::*; use crate::{OffsetReferential, OffsetType}; #[test] fn serialization() { let metaspace = Metaspace::new('_', true); let metaspace_s = r#"{"type":"Metaspace","replacement":"_","add_prefix_space":true}"#; assert_eq!(serde_json::to_string(&metaspace).unwrap(), metaspace_s); assert_eq!( serde_json::from_str::<Metaspace>(metaspace_s).unwrap(), metaspace ); // Also check it can deserialize previous versions let metaspace = Metaspace::new('_', true); let metaspace_s = r#"{"type":"Metaspace","str_rep":"_","replacement":"_","add_prefix_space":true}"#; assert_eq!( serde_json::from_str::<Metaspace>(metaspace_s).unwrap(), metaspace ); let metaspace_parsed: Metaspace = serde_json::from_str( r#"{"type":"Metaspace","replacement":"_","add_prefix_space":true}"#, ) .unwrap(); assert_eq!(metaspace_parsed, metaspace); } #[test] fn basic() { let pretok = Metaspace::new('▁', true); let mut pretokenized = PreTokenizedString::from("Hey friend!"); pretok.pre_tokenize(&mut pretokenized).unwrap(); assert_eq!( pretokenized .get_splits(OffsetReferential::Normalized, OffsetType::Byte) .into_iter() .map(|(s, o, _)| (s, o)) .collect::<Vec<_>>(), vec![("▁Hey", (0, 6)), ("▁friend!", (6, 16))] ); assert_eq!( pretokenized .get_splits(OffsetReferential::Original, OffsetType::Byte) .into_iter() .map(|(s, o, _)| (s, o)) .collect::<Vec<_>>(), vec![("▁Hey", (0, 3)), ("▁friend!", (3, 11))] ); } #[test] fn multiple_spaces() { let pretok = Metaspace::new('▁', true); let mut pretokenized = PreTokenizedString::from("Hey friend!"); pretok.pre_tokenize(&mut pretokenized).unwrap(); assert_eq!( pretokenized .get_splits(OffsetReferential::Normalized, OffsetType::Byte) .into_iter() .map(|(s, o, _)| (s, o)) .collect::<Vec<_>>(), vec![ ("▁Hey", (0, 6)), ("▁", (6, 9)), ("▁", (9, 12)), ("▁friend!", (12, 22)), ] ); assert_eq!( pretokenized .get_splits(OffsetReferential::Original, OffsetType::Byte) .into_iter() .map(|(s, o, _)| (s, o)) .collect::<Vec<_>>(), vec![ ("▁Hey", (0, 3)), ("▁", (3, 4)), ("▁", (4, 5)), ("▁friend!", (5, 13)), ] ); } #[test] fn decode() { let decoder = Metaspace::new('▁', true); let res = decoder .decode_chain(vec!["▁Hey".into(), "▁friend!".into()]) .unwrap(); assert_eq!(res, vec!["Hey", " friend!"]) } }
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/bert.rs
use crate::tokenizer::{PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior}; use crate::utils::macro_rules_attribute; use unicode_categories::UnicodeCategories; fn is_bert_punc(x: char) -> bool { char::is_ascii_punctuation(&x) || x.is_punctuation() } #[derive(Copy, Clone, Debug, PartialEq, Eq)] #[macro_rules_attribute(impl_serde_type!)] pub struct BertPreTokenizer; impl PreTokenizer for BertPreTokenizer { fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> { pretokenized.split(|_, s| s.split(char::is_whitespace, SplitDelimiterBehavior::Removed))?; pretokenized.split(|_, s| s.split(is_bert_punc, SplitDelimiterBehavior::Isolated)) } } #[cfg(test)] mod tests { use super::*; use crate::{NormalizedString, OffsetReferential, OffsetType}; #[test] fn basic() { let pretok = BertPreTokenizer; let mut pretokenized: PreTokenizedString = "Hey friend! How are you?!?".into(); pretok.pre_tokenize(&mut pretokenized).unwrap(); assert_eq!( pretokenized .get_splits(OffsetReferential::Original, OffsetType::Byte) .into_iter() .map(|(s, o, _)| (s, o)) .collect::<Vec<_>>(), vec![ ("Hey", (0, 3)), ("friend", (4, 10)), ("!", (10, 11)), ("How", (16, 19)), ("are", (20, 23)), ("you", (24, 27)), ("?", (27, 28)), ("!", (28, 29)), ("?", (29, 30)), ] ); } #[test] fn chinese_chars() { let mut n = NormalizedString::from("野口里佳 Noguchi Rika"); n.transform( n.get().to_owned().chars().flat_map(|c| { if (c as usize) > 0x4E00 { vec![(' ', 0), (c, 1), (' ', 1)] } else { vec![(c, 0)] } }), 0, ); let mut pretokenized = n.into(); let pretok = BertPreTokenizer; pretok.pre_tokenize(&mut pretokenized).unwrap(); assert_eq!( pretokenized .get_splits(OffsetReferential::Original, OffsetType::Byte) .into_iter() .map(|(s, o, _)| (s, o)) .collect::<Vec<_>>(), vec![ ("野", (0, 3)), ("口", (3, 6)), ("里", (6, 9)), ("佳", (9, 12)), ("Noguchi", (13, 20)), ("Rika", (21, 25)) ] ); } }
0
hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers
hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/unicode_scripts/mod.rs
mod pre_tokenizer; mod scripts; // Re-export the PreTokenizer pub use pre_tokenizer::UnicodeScripts;
0
hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers
hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/unicode_scripts/pre_tokenizer.rs
use crate::pre_tokenizers::unicode_scripts::scripts::{get_script, Script}; use crate::tokenizer::{normalizer::Range, PreTokenizedString, PreTokenizer, Result}; use crate::utils::macro_rules_attribute; #[derive(Clone, Debug, PartialEq, Eq)] #[macro_rules_attribute(impl_serde_type!)] pub struct UnicodeScripts; impl UnicodeScripts { pub fn new() -> Self { Self {} } } impl Default for UnicodeScripts { fn default() -> Self { Self::new() } } // This code exists in the Unigram default IsValidSentencePiece. // It could be integrated directly within `get_script` but I // think it's kind of tricky to see those modifications later // I am guessing release mode will optimize this away anyway. fn fixed_script(c: char) -> Script { let raw_script = get_script(c); if c as u32 == 0x30FC { Script::Han } else if c == ' ' { Script::Any } else { match raw_script { Script::Hiragana => Script::Han, Script::Katakana => Script::Han, script => script, } } } impl PreTokenizer for UnicodeScripts { fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> { pretokenized.split(|_, normalized| { let mut last_script = None; let mut offset = 0; let mut ranges: Vec<_> = normalized .get() .chars() .filter_map(|c| { let script = Some(fixed_script(c)); let result = if script != Some(Script::Any) && last_script != Some(Script::Any) && last_script != script { Some(offset) } else { None }; offset += c.len_utf8(); if script != Some(Script::Any) { last_script = script; } result }) .collect(); ranges.push(normalized.get().len()); Ok(ranges .windows(2) .map(|item| { normalized .slice(Range::Normalized(item[0]..item[1])) .expect("NormalizedString bad split") }) .collect::<Vec<_>>()) }) } } #[cfg(test)] mod tests { use super::*; use crate::OffsetReferential; use crate::OffsetType; #[test] fn basic() { let pretok = UnicodeScripts {}; let mut pretokenized = PreTokenizedString::from("どこで生れ。Yes"); pretok.pre_tokenize(&mut pretokenized).unwrap(); assert_eq!( pretokenized .get_splits(OffsetReferential::Normalized, OffsetType::Byte) .into_iter() .map(|(s, o, _)| (s, o)) .collect::<Vec<_>>(), vec![("どこで生れ", (0, 15)), ("。", (15, 18)), ("Yes", (18, 21))] ); assert_eq!( pretokenized .get_splits(OffsetReferential::Original, OffsetType::Byte) .into_iter() .map(|(s, o, _)| (s, o)) .collect::<Vec<_>>(), vec![("どこで生れ", (0, 15)), ("。", (15, 18)), ("Yes", (18, 21))] ); } #[test] fn spaces_are_included_in_every_script() { let pretok = UnicodeScripts {}; let mut pretokenized = PreTokenizedString::from("Apples are りんご 林檎"); pretok.pre_tokenize(&mut pretokenized).unwrap(); assert_eq!( pretokenized .get_splits(OffsetReferential::Normalized, OffsetType::Byte) .into_iter() .map(|(s, o, _)| (s, o)) .collect::<Vec<_>>(), vec![("Apples are ", (0, 11)), ("りんご 林檎", (11, 27))] ); assert_eq!( pretokenized .get_splits(OffsetReferential::Original, OffsetType::Byte) .into_iter() .map(|(s, o, _)| (s, o)) .collect::<Vec<_>>(), vec![("Apples are ", (0, 11)), ("りんご 林檎", (11, 27))] ); } #[test] fn test_unicode_script() { assert_eq!(Script::Han, fixed_script('京')); assert_eq!(Script::Han, fixed_script('太')); assert_eq!(Script::Han, fixed_script('い')); assert_eq!(Script::Han, fixed_script('グ')); assert_eq!(Script::Han, fixed_script('ー')); assert_eq!(Script::Latin, fixed_script('a')); assert_eq!(Script::Latin, fixed_script('A')); assert_eq!(Script::Common, fixed_script('0')); assert_eq!(Script::Common, fixed_script('$')); assert_eq!(Script::Common, fixed_script('@')); assert_eq!(Script::Common, fixed_script('-')); assert_eq!(Script::Any, fixed_script(' ')); } }
0
hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers
hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/unicode_scripts/scripts.rs
// Generated by modified Perl script at https://github.com/google/sentencepiece/blob/master/data/gen_unicode_scripts_code.pl // Unicode scripts : https://gist.github.com/Narsil/07556f26dc84a6baeff4d499e68d3cd2 // Rust adaptation : https://gist.github.com/Narsil/1df9fbbf5296a8d4d62de55dcb2fe700 #[derive(PartialEq, Debug, Clone, Copy, Eq)] pub enum Script { Any, Adlam, Ahom, AnatolianHieroglyphs, Arabic, Armenian, Avestan, Balinese, Bamum, BassaVah, Batak, Bengali, Bhaiksuki, Bopomofo, Brahmi, Braille, Buginese, Buhid, CanadianAboriginal, Carian, CaucasianAlbanian, Chakma, Cham, Cherokee, Common, Coptic, Cuneiform, Cypriot, Cyrillic, Deseret, Devanagari, Duployan, EgyptianHieroglyphs, Elbasan, Ethiopic, Georgian, Glagolitic, Gothic, Grantha, Greek, Gujarati, Gurmukhi, Han, Hangul, Hanunoo, Hatran, Hebrew, Hiragana, ImperialAramaic, Inherited, InscriptionalPahlavi, InscriptionalParthian, Javanese, Kaithi, Kannada, Katakana, KayahLi, Kharoshthi, Khmer, Khojki, Khudawadi, Lao, Latin, Lepcha, Limbu, LinearA, LinearB, Lisu, Lycian, Lydian, Mahajani, Malayalam, Mandaic, Manichaean, Marchen, MeeteiMayek, MendeKikakui, MeroiticCursive, MeroiticHieroglyphs, Miao, Modi, Mongolian, Mro, Multani, Myanmar, Nabataean, NewTaiLue, Newa, Nko, Ogham, OlChiki, OldHungarian, OldItalic, OldNorthArabian, OldPermic, OldPersian, OldSouthArabian, OldTurkic, Oriya, Osage, Osmanya, PahawhHmong, Palmyrene, PauCinHau, PhagsPa, Phoenician, PsalterPahlavi, Rejang, Runic, Samaritan, Saurashtra, Sharada, Shavian, Siddham, SignWriting, Sinhala, SoraSompeng, Sundanese, SylotiNagri, Syriac, Tagalog, Tagbanwa, TaiLe, TaiTham, TaiViet, Takri, Tamil, Tangut, Telugu, Thaana, Thai, Tibetan, Tifinagh, Tirhuta, Ugaritic, Vai, WarangCiti, Yi, } pub fn get_script(c: char) -> Script { match c as u32 { 0x0000..=0x001F => Script::Common, 0x0020 => Script::Common, 0x0021..=0x0023 => Script::Common, 0x0024 => Script::Common, 0x0025..=0x0027 => Script::Common, 0x0028 => Script::Common, 0x0029 => Script::Common, 0x002A => Script::Common, 0x002B => Script::Common, 0x002C => Script::Common, 0x002D => Script::Common, 0x002E..=0x002F => Script::Common, 0x0030..=0x0039 => Script::Common, 0x003A..=0x003B => Script::Common, 0x003C..=0x003E => Script::Common, 0x003F..=0x0040 => Script::Common, 0x005B => Script::Common, 0x005C => Script::Common, 0x005D => Script::Common, 0x005E => Script::Common, 0x005F => Script::Common, 0x0060 => Script::Common, 0x007B => Script::Common, 0x007C => Script::Common, 0x007D => Script::Common, 0x007E => Script::Common, 0x007F..=0x009F => Script::Common, 0x00A0 => Script::Common, 0x00A1 => Script::Common, 0x00A2..=0x00A5 => Script::Common, 0x00A6 => Script::Common, 0x00A7 => Script::Common, 0x00A8 => Script::Common, 0x00A9 => Script::Common, 0x00AB => Script::Common, 0x00AC => Script::Common, 0x00AD => Script::Common, 0x00AE => Script::Common, 0x00AF => Script::Common, 0x00B0 => Script::Common, 0x00B1 => Script::Common, 0x00B2..=0x00B3 => Script::Common, 0x00B4 => Script::Common, 0x00B5 => Script::Common, 0x00B6..=0x00B7 => Script::Common, 0x00B8 => Script::Common, 0x00B9 => Script::Common, 0x00BB => Script::Common, 0x00BC..=0x00BE => Script::Common, 0x00BF => Script::Common, 0x00D7 => Script::Common, 0x00F7 => Script::Common, 0x02B9..=0x02C1 => Script::Common, 0x02C2..=0x02C5 => Script::Common, 0x02C6..=0x02D1 => Script::Common, 0x02D2..=0x02DF => Script::Common, 0x02E5..=0x02E9 => Script::Common, 0x02EC => Script::Common, 0x02ED => Script::Common, 0x02EE => Script::Common, 0x02EF..=0x02FF => Script::Common, 0x0374 => Script::Common, 0x037E => Script::Common, 0x0385 => Script::Common, 0x0387 => Script::Common, 0x0589 => Script::Common, 0x0605 => Script::Common, 0x060C => Script::Common, 0x061B => Script::Common, 0x061C => Script::Common, 0x061F => Script::Common, 0x0640 => Script::Common, 0x06DD => Script::Common, 0x08E2 => Script::Common, 0x0964..=0x0965 => Script::Common, 0x0E3F => Script::Common, 0x0FD5..=0x0FD8 => Script::Common, 0x10FB => Script::Common, 0x16EB..=0x16ED => Script::Common, 0x1735..=0x1736 => Script::Common, 0x1802..=0x1803 => Script::Common, 0x1805 => Script::Common, 0x1CD3 => Script::Common, 0x1CE1 => Script::Common, 0x1CE9..=0x1CEC => Script::Common, 0x1CEE..=0x1CF1 => Script::Common, 0x1CF2..=0x1CF3 => Script::Common, 0x1CF5..=0x1CF6 => Script::Common, 0x2000..=0x200A => Script::Common, 0x200B => Script::Common, 0x200E..=0x200F => Script::Common, 0x2010..=0x2015 => Script::Common, 0x2016..=0x2017 => Script::Common, 0x2018 => Script::Common, 0x2019 => Script::Common, 0x201A => Script::Common, 0x201B..=0x201C => Script::Common, 0x201D => Script::Common, 0x201E => Script::Common, 0x201F => Script::Common, 0x2020..=0x2027 => Script::Common, 0x2028 => Script::Common, 0x2029 => Script::Common, 0x202A..=0x202E => Script::Common, 0x202F => Script::Common, 0x2030..=0x2038 => Script::Common, 0x2039 => Script::Common, 0x203A => Script::Common, 0x203B..=0x203E => Script::Common, 0x203F..=0x2040 => Script::Common, 0x2041..=0x2043 => Script::Common, 0x2044 => Script::Common, 0x2045 => Script::Common, 0x2046 => Script::Common, 0x2047..=0x2051 => Script::Common, 0x2052 => Script::Common, 0x2053 => Script::Common, 0x2054 => Script::Common, 0x2055..=0x205E => Script::Common, 0x205F => Script::Common, 0x2060..=0x2064 => Script::Common, 0x2066..=0x206F => Script::Common, 0x2070 => Script::Common, 0x2074..=0x2079 => Script::Common, 0x207A..=0x207C => Script::Common, 0x207D => Script::Common, 0x207E => Script::Common, 0x2080..=0x2089 => Script::Common, 0x208A..=0x208C => Script::Common, 0x208D => Script::Common, 0x208E => Script::Common, 0x20A0..=0x20BE => Script::Common, 0x2100..=0x2101 => Script::Common, 0x2102 => Script::Common, 0x2103..=0x2106 => Script::Common, 0x2107 => Script::Common, 0x2108..=0x2109 => Script::Common, 0x210A..=0x2113 => Script::Common, 0x2114 => Script::Common, 0x2115 => Script::Common, 0x2116..=0x2117 => Script::Common, 0x2118 => Script::Common, 0x2119..=0x211D => Script::Common, 0x211E..=0x2123 => Script::Common, 0x2124 => Script::Common, 0x2125 => Script::Common, 0x2127 => Script::Common, 0x2128 => Script::Common, 0x2129 => Script::Common, 0x212C..=0x212D => Script::Common, 0x212E => Script::Common, 0x212F..=0x2131 => Script::Common, 0x2133..=0x2134 => Script::Common, 0x2135..=0x2138 => Script::Common, 0x2139 => Script::Common, 0x213A..=0x213B => Script::Common, 0x213C..=0x213F => Script::Common, 0x2140..=0x2144 => Script::Common, 0x2145..=0x2149 => Script::Common, 0x214A => Script::Common, 0x214B => Script::Common, 0x214C..=0x214D => Script::Common, 0x214F => Script::Common, 0x2150..=0x215F => Script::Common, 0x2189 => Script::Common, 0x218A..=0x218B => Script::Common, 0x2190..=0x2194 => Script::Common, 0x2195..=0x2199 => Script::Common, 0x219A..=0x219B => Script::Common, 0x219C..=0x219F => Script::Common, 0x21A0 => Script::Common, 0x21A1..=0x21A2 => Script::Common, 0x21A3 => Script::Common, 0x21A4..=0x21A5 => Script::Common, 0x21A6 => Script::Common, 0x21A7..=0x21AD => Script::Common, 0x21AE => Script::Common, 0x21AF..=0x21CD => Script::Common, 0x21CE..=0x21CF => Script::Common, 0x21D0..=0x21D1 => Script::Common, 0x21D2 => Script::Common, 0x21D3 => Script::Common, 0x21D4 => Script::Common, 0x21D5..=0x21F3 => Script::Common, 0x21F4..=0x22FF => Script::Common, 0x2300..=0x2307 => Script::Common, 0x2308 => Script::Common, 0x2309 => Script::Common, 0x230A => Script::Common, 0x230B => Script::Common, 0x230C..=0x231F => Script::Common, 0x2320..=0x2321 => Script::Common, 0x2322..=0x2328 => Script::Common, 0x2329 => Script::Common, 0x232A => Script::Common, 0x232B..=0x237B => Script::Common, 0x237C => Script::Common, 0x237D..=0x239A => Script::Common, 0x239B..=0x23B3 => Script::Common, 0x23B4..=0x23DB => Script::Common, 0x23DC..=0x23E1 => Script::Common, 0x23E2..=0x23FE => Script::Common, 0x2400..=0x2426 => Script::Common, 0x2440..=0x244A => Script::Common, 0x2460..=0x249B => Script::Common, 0x249C..=0x24E9 => Script::Common, 0x24EA..=0x24FF => Script::Common, 0x2500..=0x25B6 => Script::Common, 0x25B7 => Script::Common, 0x25B8..=0x25C0 => Script::Common, 0x25C1 => Script::Common, 0x25C2..=0x25F7 => Script::Common, 0x25F8..=0x25FF => Script::Common, 0x2600..=0x266E => Script::Common, 0x266F => Script::Common, 0x2670..=0x2767 => Script::Common, 0x2768 => Script::Common, 0x2769 => Script::Common, 0x276A => Script::Common, 0x276B => Script::Common, 0x276C => Script::Common, 0x276D => Script::Common, 0x276E => Script::Common, 0x276F => Script::Common, 0x2770 => Script::Common, 0x2771 => Script::Common, 0x2772 => Script::Common, 0x2773 => Script::Common, 0x2774 => Script::Common, 0x2775 => Script::Common, 0x2776..=0x2793 => Script::Common, 0x2794..=0x27BF => Script::Common, 0x27C0..=0x27C4 => Script::Common, 0x27C5 => Script::Common, 0x27C6 => Script::Common, 0x27C7..=0x27E5 => Script::Common, 0x27E6 => Script::Common, 0x27E7 => Script::Common, 0x27E8 => Script::Common, 0x27E9 => Script::Common, 0x27EA => Script::Common, 0x27EB => Script::Common, 0x27EC => Script::Common, 0x27ED => Script::Common, 0x27EE => Script::Common, 0x27EF => Script::Common, 0x27F0..=0x27FF => Script::Common, 0x2900..=0x2982 => Script::Common, 0x2983 => Script::Common, 0x2984 => Script::Common, 0x2985 => Script::Common, 0x2986 => Script::Common, 0x2987 => Script::Common, 0x2988 => Script::Common, 0x2989 => Script::Common, 0x298A => Script::Common, 0x298B => Script::Common, 0x298C => Script::Common, 0x298D => Script::Common, 0x298E => Script::Common, 0x298F => Script::Common, 0x2990 => Script::Common, 0x2991 => Script::Common, 0x2992 => Script::Common, 0x2993 => Script::Common, 0x2994 => Script::Common, 0x2995 => Script::Common, 0x2996 => Script::Common, 0x2997 => Script::Common, 0x2998 => Script::Common, 0x2999..=0x29D7 => Script::Common, 0x29D8 => Script::Common, 0x29D9 => Script::Common, 0x29DA => Script::Common, 0x29DB => Script::Common, 0x29DC..=0x29FB => Script::Common, 0x29FC => Script::Common, 0x29FD => Script::Common, 0x29FE..=0x2AFF => Script::Common, 0x2B00..=0x2B2F => Script::Common, 0x2B30..=0x2B44 => Script::Common, 0x2B45..=0x2B46 => Script::Common, 0x2B47..=0x2B4C => Script::Common, 0x2B4D..=0x2B73 => Script::Common, 0x2B76..=0x2B95 => Script::Common, 0x2B98..=0x2BB9 => Script::Common, 0x2BBD..=0x2BC8 => Script::Common, 0x2BCA..=0x2BD1 => Script::Common, 0x2BEC..=0x2BEF => Script::Common, 0x2E00..=0x2E01 => Script::Common, 0x2E02 => Script::Common, 0x2E03 => Script::Common, 0x2E04 => Script::Common, 0x2E05 => Script::Common, 0x2E06..=0x2E08 => Script::Common, 0x2E09 => Script::Common, 0x2E0A => Script::Common, 0x2E0B => Script::Common, 0x2E0C => Script::Common, 0x2E0D => Script::Common, 0x2E0E..=0x2E16 => Script::Common, 0x2E17 => Script::Common, 0x2E18..=0x2E19 => Script::Common, 0x2E1A => Script::Common, 0x2E1B => Script::Common, 0x2E1C => Script::Common, 0x2E1D => Script::Common, 0x2E1E..=0x2E1F => Script::Common, 0x2E20 => Script::Common, 0x2E21 => Script::Common, 0x2E22 => Script::Common, 0x2E23 => Script::Common, 0x2E24 => Script::Common, 0x2E25 => Script::Common, 0x2E26 => Script::Common, 0x2E27 => Script::Common, 0x2E28 => Script::Common, 0x2E29 => Script::Common, 0x2E2A..=0x2E2E => Script::Common, 0x2E2F => Script::Common, 0x2E30..=0x2E39 => Script::Common, 0x2E3A..=0x2E3B => Script::Common, 0x2E3C..=0x2E3F => Script::Common, 0x2E40 => Script::Common, 0x2E41 => Script::Common, 0x2E42 => Script::Common, 0x2E43..=0x2E44 => Script::Common, 0x2FF0..=0x2FFB => Script::Common, 0x3000 => Script::Common, 0x3001..=0x3003 => Script::Common, 0x3004 => Script::Common, 0x3006 => Script::Common, 0x3008 => Script::Common, 0x3009 => Script::Common, 0x300A => Script::Common, 0x300B => Script::Common, 0x300C => Script::Common, 0x300D => Script::Common, 0x300E => Script::Common, 0x300F => Script::Common, 0x3010 => Script::Common, 0x3011 => Script::Common, 0x3012..=0x3013 => Script::Common, 0x3014 => Script::Common, 0x3015 => Script::Common, 0x3016 => Script::Common, 0x3017 => Script::Common, 0x3018 => Script::Common, 0x3019 => Script::Common, 0x301A => Script::Common, 0x301B => Script::Common, 0x301C => Script::Common, 0x301D => Script::Common, 0x301E..=0x301F => Script::Common, 0x3020 => Script::Common, 0x3030 => Script::Common, 0x3031..=0x3035 => Script::Common, 0x3036..=0x3037 => Script::Common, 0x303C => Script::Common, 0x303D => Script::Common, 0x303E..=0x303F => Script::Common, 0x309B..=0x309C => Script::Common, 0x30A0 => Script::Common, 0x30FB => Script::Common, 0x30FC => Script::Common, 0x3190..=0x3191 => Script::Common, 0x3192..=0x3195 => Script::Common, 0x3196..=0x319F => Script::Common, 0x31C0..=0x31E3 => Script::Common, 0x3220..=0x3229 => Script::Common, 0x322A..=0x3247 => Script::Common, 0x3248..=0x324F => Script::Common, 0x3250 => Script::Common, 0x3251..=0x325F => Script::Common, 0x327F => Script::Common, 0x3280..=0x3289 => Script::Common, 0x328A..=0x32B0 => Script::Common, 0x32B1..=0x32BF => Script::Common, 0x32C0..=0x32CF => Script::Common, 0x3358..=0x33FF => Script::Common, 0x4DC0..=0x4DFF => Script::Common, 0xA700..=0xA716 => Script::Common, 0xA717..=0xA71F => Script::Common, 0xA720..=0xA721 => Script::Common, 0xA788 => Script::Common, 0xA789..=0xA78A => Script::Common, 0xA830..=0xA835 => Script::Common, 0xA836..=0xA837 => Script::Common, 0xA838 => Script::Common, 0xA839 => Script::Common, 0xA92E => Script::Common, 0xA9CF => Script::Common, 0xAB5B => Script::Common, 0xFD3E => Script::Common, 0xFD3F => Script::Common, 0xFE10..=0xFE16 => Script::Common, 0xFE17 => Script::Common, 0xFE18 => Script::Common, 0xFE19 => Script::Common, 0xFE30 => Script::Common, 0xFE31..=0xFE32 => Script::Common, 0xFE33..=0xFE34 => Script::Common, 0xFE35 => Script::Common, 0xFE36 => Script::Common, 0xFE37 => Script::Common, 0xFE38 => Script::Common, 0xFE39 => Script::Common, 0xFE3A => Script::Common, 0xFE3B => Script::Common, 0xFE3C => Script::Common, 0xFE3D => Script::Common, 0xFE3E => Script::Common, 0xFE3F => Script::Common, 0xFE40 => Script::Common, 0xFE41 => Script::Common, 0xFE42 => Script::Common, 0xFE43 => Script::Common, 0xFE44 => Script::Common, 0xFE45..=0xFE46 => Script::Common, 0xFE47 => Script::Common, 0xFE48 => Script::Common, 0xFE49..=0xFE4C => Script::Common, 0xFE4D..=0xFE4F => Script::Common, 0xFE50..=0xFE52 => Script::Common, 0xFE54..=0xFE57 => Script::Common, 0xFE58 => Script::Common, 0xFE59 => Script::Common, 0xFE5A => Script::Common, 0xFE5B => Script::Common, 0xFE5C => Script::Common, 0xFE5D => Script::Common, 0xFE5E => Script::Common, 0xFE5F..=0xFE61 => Script::Common, 0xFE62 => Script::Common, 0xFE63 => Script::Common, 0xFE64..=0xFE66 => Script::Common, 0xFE68 => Script::Common, 0xFE69 => Script::Common, 0xFE6A..=0xFE6B => Script::Common, 0xFEFF => Script::Common, 0xFF01..=0xFF03 => Script::Common, 0xFF04 => Script::Common, 0xFF05..=0xFF07 => Script::Common, 0xFF08 => Script::Common, 0xFF09 => Script::Common, 0xFF0A => Script::Common, 0xFF0B => Script::Common, 0xFF0C => Script::Common, 0xFF0D => Script::Common, 0xFF0E..=0xFF0F => Script::Common, 0xFF10..=0xFF19 => Script::Common, 0xFF1A..=0xFF1B => Script::Common, 0xFF1C..=0xFF1E => Script::Common, 0xFF1F..=0xFF20 => Script::Common, 0xFF3B => Script::Common, 0xFF3C => Script::Common, 0xFF3D => Script::Common, 0xFF3E => Script::Common, 0xFF3F => Script::Common, 0xFF40 => Script::Common, 0xFF5B => Script::Common, 0xFF5C => Script::Common, 0xFF5D => Script::Common, 0xFF5E => Script::Common, 0xFF5F => Script::Common, 0xFF60 => Script::Common, 0xFF61 => Script::Common, 0xFF62 => Script::Common, 0xFF63 => Script::Common, 0xFF64..=0xFF65 => Script::Common, 0xFF70 => Script::Common, 0xFF9E..=0xFF9F => Script::Common, 0xFFE0..=0xFFE1 => Script::Common, 0xFFE2 => Script::Common, 0xFFE3 => Script::Common, 0xFFE4 => Script::Common, 0xFFE5..=0xFFE6 => Script::Common, 0xFFE8 => Script::Common, 0xFFE9..=0xFFEC => Script::Common, 0xFFED..=0xFFEE => Script::Common, 0xFFF9..=0xFFFB => Script::Common, 0xFFFC..=0xFFFD => Script::Common, 0x10100..=0x10102 => Script::Common, 0x10107..=0x10133 => Script::Common, 0x10137..=0x1013F => Script::Common, 0x10190..=0x1019B => Script::Common, 0x101D0..=0x101FC => Script::Common, 0x102E1..=0x102FB => Script::Common, 0x1BCA0..=0x1BCA3 => Script::Common, 0x1D000..=0x1D0F5 => Script::Common, 0x1D100..=0x1D126 => Script::Common, 0x1D129..=0x1D164 => Script::Common, 0x1D165..=0x1D166 => Script::Common, 0x1D16A..=0x1D16C => Script::Common, 0x1D16D..=0x1D172 => Script::Common, 0x1D173..=0x1D17A => Script::Common, 0x1D183..=0x1D184 => Script::Common, 0x1D18C..=0x1D1A9 => Script::Common, 0x1D1AE..=0x1D1E8 => Script::Common, 0x1D300..=0x1D356 => Script::Common, 0x1D360..=0x1D371 => Script::Common, 0x1D400..=0x1D454 => Script::Common, 0x1D456..=0x1D49C => Script::Common, 0x1D49E..=0x1D49F => Script::Common, 0x1D4A2 => Script::Common, 0x1D4A5..=0x1D4A6 => Script::Common, 0x1D4A9..=0x1D4AC => Script::Common, 0x1D4AE..=0x1D4B9 => Script::Common, 0x1D4BB => Script::Common, 0x1D4BD..=0x1D4C3 => Script::Common, 0x1D4C5..=0x1D505 => Script::Common, 0x1D507..=0x1D50A => Script::Common, 0x1D50D..=0x1D514 => Script::Common, 0x1D516..=0x1D51C => Script::Common, 0x1D51E..=0x1D539 => Script::Common, 0x1D53B..=0x1D53E => Script::Common, 0x1D540..=0x1D544 => Script::Common, 0x1D546 => Script::Common, 0x1D54A..=0x1D550 => Script::Common, 0x1D552..=0x1D6A5 => Script::Common, 0x1D6A8..=0x1D6C0 => Script::Common, 0x1D6C1 => Script::Common, 0x1D6C2..=0x1D6DA => Script::Common, 0x1D6DB => Script::Common, 0x1D6DC..=0x1D6FA => Script::Common, 0x1D6FB => Script::Common, 0x1D6FC..=0x1D714 => Script::Common, 0x1D715 => Script::Common, 0x1D716..=0x1D734 => Script::Common, 0x1D735 => Script::Common, 0x1D736..=0x1D74E => Script::Common, 0x1D74F => Script::Common, 0x1D750..=0x1D76E => Script::Common, 0x1D76F => Script::Common, 0x1D770..=0x1D788 => Script::Common, 0x1D789 => Script::Common, 0x1D78A..=0x1D7A8 => Script::Common, 0x1D7A9 => Script::Common, 0x1D7AA..=0x1D7C2 => Script::Common, 0x1D7C3 => Script::Common, 0x1D7C4..=0x1D7CB => Script::Common, 0x1D7CE..=0x1D7FF => Script::Common, 0x1F000..=0x1F02B => Script::Common, 0x1F030..=0x1F093 => Script::Common, 0x1F0A0..=0x1F0AE => Script::Common, 0x1F0B1..=0x1F0BF => Script::Common, 0x1F0C1..=0x1F0CF => Script::Common, 0x1F0D1..=0x1F0F5 => Script::Common, 0x1F100..=0x1F10C => Script::Common, 0x1F110..=0x1F12E => Script::Common, 0x1F130..=0x1F16B => Script::Common, 0x1F170..=0x1F1AC => Script::Common, 0x1F1E6..=0x1F1FF => Script::Common, 0x1F201..=0x1F202 => Script::Common, 0x1F210..=0x1F23B => Script::Common, 0x1F240..=0x1F248 => Script::Common, 0x1F250..=0x1F251 => Script::Common, 0x1F300..=0x1F3FA => Script::Common, 0x1F3FB..=0x1F3FF => Script::Common, 0x1F400..=0x1F6D2 => Script::Common, 0x1F6E0..=0x1F6EC => Script::Common, 0x1F6F0..=0x1F6F6 => Script::Common, 0x1F700..=0x1F773 => Script::Common, 0x1F780..=0x1F7D4 => Script::Common, 0x1F800..=0x1F80B => Script::Common, 0x1F810..=0x1F847 => Script::Common, 0x1F850..=0x1F859 => Script::Common, 0x1F860..=0x1F887 => Script::Common, 0x1F890..=0x1F8AD => Script::Common, 0x1F910..=0x1F91E => Script::Common, 0x1F920..=0x1F927 => Script::Common, 0x1F930 => Script::Common, 0x1F933..=0x1F93E => Script::Common, 0x1F940..=0x1F94B => Script::Common, 0x1F950..=0x1F95E => Script::Common, 0x1F980..=0x1F991 => Script::Common, 0x1F9C0 => Script::Common, 0xE0001 => Script::Common, 0xE0020..=0xE007F => Script::Common, 0x0041..=0x005A => Script::Latin, 0x0061..=0x007A => Script::Latin, 0x00AA => Script::Latin, 0x00BA => Script::Latin, 0x00C0..=0x00D6 => Script::Latin, 0x00D8..=0x00F6 => Script::Latin, 0x00F8..=0x01BA => Script::Latin, 0x01BB => Script::Latin, 0x01BC..=0x01BF => Script::Latin, 0x01C0..=0x01C3 => Script::Latin, 0x01C4..=0x0293 => Script::Latin, 0x0294 => Script::Latin, 0x0295..=0x02AF => Script::Latin, 0x02B0..=0x02B8 => Script::Latin, 0x02E0..=0x02E4 => Script::Latin, 0x1D00..=0x1D25 => Script::Latin, 0x1D2C..=0x1D5C => Script::Latin, 0x1D62..=0x1D65 => Script::Latin, 0x1D6B..=0x1D77 => Script::Latin, 0x1D79..=0x1D9A => Script::Latin, 0x1D9B..=0x1DBE => Script::Latin, 0x1E00..=0x1EFF => Script::Latin, 0x2071 => Script::Latin, 0x207F => Script::Latin, 0x2090..=0x209C => Script::Latin, 0x212A..=0x212B => Script::Latin, 0x2132 => Script::Latin, 0x214E => Script::Latin, 0x2160..=0x2182 => Script::Latin, 0x2183..=0x2184 => Script::Latin, 0x2185..=0x2188 => Script::Latin, 0x2C60..=0x2C7B => Script::Latin, 0x2C7C..=0x2C7D => Script::Latin, 0x2C7E..=0x2C7F => Script::Latin, 0xA722..=0xA76F => Script::Latin, 0xA770 => Script::Latin, 0xA771..=0xA787 => Script::Latin, 0xA78B..=0xA78E => Script::Latin, 0xA78F => Script::Latin, 0xA790..=0xA7AE => Script::Latin, 0xA7B0..=0xA7B7 => Script::Latin, 0xA7F7 => Script::Latin, 0xA7F8..=0xA7F9 => Script::Latin, 0xA7FA => Script::Latin, 0xA7FB..=0xA7FF => Script::Latin, 0xAB30..=0xAB5A => Script::Latin, 0xAB5C..=0xAB5F => Script::Latin, 0xAB60..=0xAB64 => Script::Latin, 0xFB00..=0xFB06 => Script::Latin, 0xFF21..=0xFF3A => Script::Latin, 0xFF41..=0xFF5A => Script::Latin, 0x0370..=0x0373 => Script::Greek, 0x0375 => Script::Greek, 0x0376..=0x0377 => Script::Greek, 0x037A => Script::Greek, 0x037B..=0x037D => Script::Greek, 0x037F => Script::Greek, 0x0384 => Script::Greek, 0x0386 => Script::Greek, 0x0388..=0x038A => Script::Greek, 0x038C => Script::Greek, 0x038E..=0x03A1 => Script::Greek, 0x03A3..=0x03E1 => Script::Greek, 0x03F0..=0x03F5 => Script::Greek, 0x03F6 => Script::Greek, 0x03F7..=0x03FF => Script::Greek, 0x1D26..=0x1D2A => Script::Greek, 0x1D5D..=0x1D61 => Script::Greek, 0x1D66..=0x1D6A => Script::Greek, 0x1DBF => Script::Greek, 0x1F00..=0x1F15 => Script::Greek, 0x1F18..=0x1F1D => Script::Greek, 0x1F20..=0x1F45 => Script::Greek, 0x1F48..=0x1F4D => Script::Greek, 0x1F50..=0x1F57 => Script::Greek, 0x1F59 => Script::Greek, 0x1F5B => Script::Greek, 0x1F5D => Script::Greek, 0x1F5F..=0x1F7D => Script::Greek, 0x1F80..=0x1FB4 => Script::Greek, 0x1FB6..=0x1FBC => Script::Greek, 0x1FBD => Script::Greek, 0x1FBE => Script::Greek, 0x1FBF..=0x1FC1 => Script::Greek, 0x1FC2..=0x1FC4 => Script::Greek, 0x1FC6..=0x1FCC => Script::Greek, 0x1FCD..=0x1FCF => Script::Greek, 0x1FD0..=0x1FD3 => Script::Greek, 0x1FD6..=0x1FDB => Script::Greek, 0x1FDD..=0x1FDF => Script::Greek, 0x1FE0..=0x1FEC => Script::Greek, 0x1FED..=0x1FEF => Script::Greek, 0x1FF2..=0x1FF4 => Script::Greek, 0x1FF6..=0x1FFC => Script::Greek, 0x1FFD..=0x1FFE => Script::Greek, 0x2126 => Script::Greek, 0xAB65 => Script::Greek, 0x10140..=0x10174 => Script::Greek, 0x10175..=0x10178 => Script::Greek, 0x10179..=0x10189 => Script::Greek, 0x1018A..=0x1018B => Script::Greek, 0x1018C..=0x1018E => Script::Greek, 0x101A0 => Script::Greek, 0x1D200..=0x1D241 => Script::Greek, 0x1D242..=0x1D244 => Script::Greek, 0x1D245 => Script::Greek, 0x0400..=0x0481 => Script::Cyrillic, 0x0482 => Script::Cyrillic, 0x0483..=0x0484 => Script::Cyrillic, 0x0487 => Script::Cyrillic, 0x0488..=0x0489 => Script::Cyrillic, 0x048A..=0x052F => Script::Cyrillic, 0x1C80..=0x1C88 => Script::Cyrillic, 0x1D2B => Script::Cyrillic, 0x1D78 => Script::Cyrillic, 0x2DE0..=0x2DFF => Script::Cyrillic, 0xA640..=0xA66D => Script::Cyrillic, 0xA66E => Script::Cyrillic, 0xA66F => Script::Cyrillic, 0xA670..=0xA672 => Script::Cyrillic, 0xA673 => Script::Cyrillic, 0xA674..=0xA67D => Script::Cyrillic, 0xA67E => Script::Cyrillic, 0xA67F => Script::Cyrillic, 0xA680..=0xA69B => Script::Cyrillic, 0xA69C..=0xA69D => Script::Cyrillic, 0xA69E..=0xA69F => Script::Cyrillic, 0xFE2E..=0xFE2F => Script::Cyrillic, 0x0531..=0x0556 => Script::Armenian, 0x0559 => Script::Armenian, 0x055A..=0x055F => Script::Armenian, 0x0561..=0x0587 => Script::Armenian, 0x058A => Script::Armenian, 0x058D..=0x058E => Script::Armenian, 0x058F => Script::Armenian, 0xFB13..=0xFB17 => Script::Armenian, 0x0591..=0x05BD => Script::Hebrew, 0x05BE => Script::Hebrew, 0x05BF => Script::Hebrew, 0x05C0 => Script::Hebrew, 0x05C1..=0x05C2 => Script::Hebrew, 0x05C3 => Script::Hebrew, 0x05C4..=0x05C5 => Script::Hebrew, 0x05C6 => Script::Hebrew, 0x05C7 => Script::Hebrew, 0x05D0..=0x05EA => Script::Hebrew, 0x05F0..=0x05F2 => Script::Hebrew, 0x05F3..=0x05F4 => Script::Hebrew, 0xFB1D => Script::Hebrew, 0xFB1E => Script::Hebrew, 0xFB1F..=0xFB28 => Script::Hebrew, 0xFB29 => Script::Hebrew, 0xFB2A..=0xFB36 => Script::Hebrew, 0xFB38..=0xFB3C => Script::Hebrew, 0xFB3E => Script::Hebrew, 0xFB40..=0xFB41 => Script::Hebrew, 0xFB43..=0xFB44 => Script::Hebrew, 0xFB46..=0xFB4F => Script::Hebrew, 0x0600..=0x0604 => Script::Arabic, 0x0606..=0x0608 => Script::Arabic, 0x0609..=0x060A => Script::Arabic, 0x060B => Script::Arabic, 0x060D => Script::Arabic, 0x060E..=0x060F => Script::Arabic, 0x0610..=0x061A => Script::Arabic, 0x061E => Script::Arabic, 0x0620..=0x063F => Script::Arabic, 0x0641..=0x064A => Script::Arabic, 0x0656..=0x065F => Script::Arabic, 0x0660..=0x0669 => Script::Arabic, 0x066A..=0x066D => Script::Arabic, 0x066E..=0x066F => Script::Arabic, 0x0671..=0x06D3 => Script::Arabic, 0x06D4 => Script::Arabic, 0x06D5 => Script::Arabic, 0x06D6..=0x06DC => Script::Arabic, 0x06DE => Script::Arabic, 0x06DF..=0x06E4 => Script::Arabic, 0x06E5..=0x06E6 => Script::Arabic, 0x06E7..=0x06E8 => Script::Arabic, 0x06E9 => Script::Arabic, 0x06EA..=0x06ED => Script::Arabic, 0x06EE..=0x06EF => Script::Arabic, 0x06F0..=0x06F9 => Script::Arabic, 0x06FA..=0x06FC => Script::Arabic, 0x06FD..=0x06FE => Script::Arabic, 0x06FF => Script::Arabic, 0x0750..=0x077F => Script::Arabic, 0x08A0..=0x08B4 => Script::Arabic, 0x08B6..=0x08BD => Script::Arabic, 0x08D4..=0x08E1 => Script::Arabic, 0x08E3..=0x08FF => Script::Arabic, 0xFB50..=0xFBB1 => Script::Arabic, 0xFBB2..=0xFBC1 => Script::Arabic, 0xFBD3..=0xFD3D => Script::Arabic, 0xFD50..=0xFD8F => Script::Arabic, 0xFD92..=0xFDC7 => Script::Arabic, 0xFDF0..=0xFDFB => Script::Arabic, 0xFDFC => Script::Arabic, 0xFDFD => Script::Arabic, 0xFE70..=0xFE74 => Script::Arabic, 0xFE76..=0xFEFC => Script::Arabic, 0x10E60..=0x10E7E => Script::Arabic, 0x1EE00..=0x1EE03 => Script::Arabic, 0x1EE05..=0x1EE1F => Script::Arabic, 0x1EE21..=0x1EE22 => Script::Arabic, 0x1EE24 => Script::Arabic, 0x1EE27 => Script::Arabic, 0x1EE29..=0x1EE32 => Script::Arabic, 0x1EE34..=0x1EE37 => Script::Arabic, 0x1EE39 => Script::Arabic, 0x1EE3B => Script::Arabic, 0x1EE42 => Script::Arabic, 0x1EE47 => Script::Arabic, 0x1EE49 => Script::Arabic, 0x1EE4B => Script::Arabic, 0x1EE4D..=0x1EE4F => Script::Arabic, 0x1EE51..=0x1EE52 => Script::Arabic, 0x1EE54 => Script::Arabic, 0x1EE57 => Script::Arabic, 0x1EE59 => Script::Arabic, 0x1EE5B => Script::Arabic, 0x1EE5D => Script::Arabic, 0x1EE5F => Script::Arabic, 0x1EE61..=0x1EE62 => Script::Arabic, 0x1EE64 => Script::Arabic, 0x1EE67..=0x1EE6A => Script::Arabic, 0x1EE6C..=0x1EE72 => Script::Arabic, 0x1EE74..=0x1EE77 => Script::Arabic, 0x1EE79..=0x1EE7C => Script::Arabic, 0x1EE7E => Script::Arabic, 0x1EE80..=0x1EE89 => Script::Arabic, 0x1EE8B..=0x1EE9B => Script::Arabic, 0x1EEA1..=0x1EEA3 => Script::Arabic, 0x1EEA5..=0x1EEA9 => Script::Arabic, 0x1EEAB..=0x1EEBB => Script::Arabic, 0x1EEF0..=0x1EEF1 => Script::Arabic, 0x0700..=0x070D => Script::Syriac, 0x070F => Script::Syriac, 0x0710 => Script::Syriac, 0x0711 => Script::Syriac, 0x0712..=0x072F => Script::Syriac, 0x0730..=0x074A => Script::Syriac, 0x074D..=0x074F => Script::Syriac, 0x0780..=0x07A5 => Script::Thaana, 0x07A6..=0x07B0 => Script::Thaana, 0x07B1 => Script::Thaana, 0x0900..=0x0902 => Script::Devanagari, 0x0903 => Script::Devanagari, 0x0904..=0x0939 => Script::Devanagari, 0x093A => Script::Devanagari, 0x093B => Script::Devanagari, 0x093C => Script::Devanagari, 0x093D => Script::Devanagari, 0x093E..=0x0940 => Script::Devanagari, 0x0941..=0x0948 => Script::Devanagari, 0x0949..=0x094C => Script::Devanagari, 0x094D => Script::Devanagari, 0x094E..=0x094F => Script::Devanagari, 0x0950 => Script::Devanagari, 0x0953..=0x0957 => Script::Devanagari, 0x0958..=0x0961 => Script::Devanagari, 0x0962..=0x0963 => Script::Devanagari, 0x0966..=0x096F => Script::Devanagari, 0x0970 => Script::Devanagari, 0x0971 => Script::Devanagari, 0x0972..=0x097F => Script::Devanagari, 0xA8E0..=0xA8F1 => Script::Devanagari, 0xA8F2..=0xA8F7 => Script::Devanagari, 0xA8F8..=0xA8FA => Script::Devanagari, 0xA8FB => Script::Devanagari, 0xA8FC => Script::Devanagari, 0xA8FD => Script::Devanagari, 0x0980 => Script::Bengali, 0x0981 => Script::Bengali, 0x0982..=0x0983 => Script::Bengali, 0x0985..=0x098C => Script::Bengali, 0x098F..=0x0990 => Script::Bengali, 0x0993..=0x09A8 => Script::Bengali, 0x09AA..=0x09B0 => Script::Bengali, 0x09B2 => Script::Bengali, 0x09B6..=0x09B9 => Script::Bengali, 0x09BC => Script::Bengali, 0x09BD => Script::Bengali, 0x09BE..=0x09C0 => Script::Bengali, 0x09C1..=0x09C4 => Script::Bengali, 0x09C7..=0x09C8 => Script::Bengali, 0x09CB..=0x09CC => Script::Bengali, 0x09CD => Script::Bengali, 0x09CE => Script::Bengali, 0x09D7 => Script::Bengali, 0x09DC..=0x09DD => Script::Bengali, 0x09DF..=0x09E1 => Script::Bengali, 0x09E2..=0x09E3 => Script::Bengali, 0x09E6..=0x09EF => Script::Bengali, 0x09F0..=0x09F1 => Script::Bengali, 0x09F2..=0x09F3 => Script::Bengali, 0x09F4..=0x09F9 => Script::Bengali, 0x09FA => Script::Bengali, 0x09FB => Script::Bengali, 0x0A01..=0x0A02 => Script::Gurmukhi, 0x0A03 => Script::Gurmukhi, 0x0A05..=0x0A0A => Script::Gurmukhi, 0x0A0F..=0x0A10 => Script::Gurmukhi, 0x0A13..=0x0A28 => Script::Gurmukhi, 0x0A2A..=0x0A30 => Script::Gurmukhi, 0x0A32..=0x0A33 => Script::Gurmukhi, 0x0A35..=0x0A36 => Script::Gurmukhi, 0x0A38..=0x0A39 => Script::Gurmukhi, 0x0A3C => Script::Gurmukhi, 0x0A3E..=0x0A40 => Script::Gurmukhi, 0x0A41..=0x0A42 => Script::Gurmukhi, 0x0A47..=0x0A48 => Script::Gurmukhi, 0x0A4B..=0x0A4D => Script::Gurmukhi, 0x0A51 => Script::Gurmukhi, 0x0A59..=0x0A5C => Script::Gurmukhi, 0x0A5E => Script::Gurmukhi, 0x0A66..=0x0A6F => Script::Gurmukhi, 0x0A70..=0x0A71 => Script::Gurmukhi, 0x0A72..=0x0A74 => Script::Gurmukhi, 0x0A75 => Script::Gurmukhi, 0x0A81..=0x0A82 => Script::Gujarati, 0x0A83 => Script::Gujarati, 0x0A85..=0x0A8D => Script::Gujarati, 0x0A8F..=0x0A91 => Script::Gujarati, 0x0A93..=0x0AA8 => Script::Gujarati, 0x0AAA..=0x0AB0 => Script::Gujarati, 0x0AB2..=0x0AB3 => Script::Gujarati, 0x0AB5..=0x0AB9 => Script::Gujarati, 0x0ABC => Script::Gujarati, 0x0ABD => Script::Gujarati, 0x0ABE..=0x0AC0 => Script::Gujarati, 0x0AC1..=0x0AC5 => Script::Gujarati, 0x0AC7..=0x0AC8 => Script::Gujarati, 0x0AC9 => Script::Gujarati, 0x0ACB..=0x0ACC => Script::Gujarati, 0x0ACD => Script::Gujarati, 0x0AD0 => Script::Gujarati, 0x0AE0..=0x0AE1 => Script::Gujarati, 0x0AE2..=0x0AE3 => Script::Gujarati, 0x0AE6..=0x0AEF => Script::Gujarati, 0x0AF0 => Script::Gujarati, 0x0AF1 => Script::Gujarati, 0x0AF9 => Script::Gujarati, 0x0B01 => Script::Oriya, 0x0B02..=0x0B03 => Script::Oriya, 0x0B05..=0x0B0C => Script::Oriya, 0x0B0F..=0x0B10 => Script::Oriya, 0x0B13..=0x0B28 => Script::Oriya, 0x0B2A..=0x0B30 => Script::Oriya, 0x0B32..=0x0B33 => Script::Oriya, 0x0B35..=0x0B39 => Script::Oriya, 0x0B3C => Script::Oriya, 0x0B3D => Script::Oriya, 0x0B3E => Script::Oriya, 0x0B3F => Script::Oriya, 0x0B40 => Script::Oriya, 0x0B41..=0x0B44 => Script::Oriya, 0x0B47..=0x0B48 => Script::Oriya, 0x0B4B..=0x0B4C => Script::Oriya, 0x0B4D => Script::Oriya, 0x0B56 => Script::Oriya, 0x0B57 => Script::Oriya, 0x0B5C..=0x0B5D => Script::Oriya, 0x0B5F..=0x0B61 => Script::Oriya, 0x0B62..=0x0B63 => Script::Oriya, 0x0B66..=0x0B6F => Script::Oriya, 0x0B70 => Script::Oriya, 0x0B71 => Script::Oriya, 0x0B72..=0x0B77 => Script::Oriya, 0x0B82 => Script::Tamil, 0x0B83 => Script::Tamil, 0x0B85..=0x0B8A => Script::Tamil, 0x0B8E..=0x0B90 => Script::Tamil, 0x0B92..=0x0B95 => Script::Tamil, 0x0B99..=0x0B9A => Script::Tamil, 0x0B9C => Script::Tamil, 0x0B9E..=0x0B9F => Script::Tamil, 0x0BA3..=0x0BA4 => Script::Tamil, 0x0BA8..=0x0BAA => Script::Tamil, 0x0BAE..=0x0BB9 => Script::Tamil, 0x0BBE..=0x0BBF => Script::Tamil, 0x0BC0 => Script::Tamil, 0x0BC1..=0x0BC2 => Script::Tamil, 0x0BC6..=0x0BC8 => Script::Tamil, 0x0BCA..=0x0BCC => Script::Tamil, 0x0BCD => Script::Tamil, 0x0BD0 => Script::Tamil, 0x0BD7 => Script::Tamil, 0x0BE6..=0x0BEF => Script::Tamil, 0x0BF0..=0x0BF2 => Script::Tamil, 0x0BF3..=0x0BF8 => Script::Tamil, 0x0BF9 => Script::Tamil, 0x0BFA => Script::Tamil, 0x0C00 => Script::Telugu, 0x0C01..=0x0C03 => Script::Telugu, 0x0C05..=0x0C0C => Script::Telugu, 0x0C0E..=0x0C10 => Script::Telugu, 0x0C12..=0x0C28 => Script::Telugu, 0x0C2A..=0x0C39 => Script::Telugu, 0x0C3D => Script::Telugu, 0x0C3E..=0x0C40 => Script::Telugu, 0x0C41..=0x0C44 => Script::Telugu, 0x0C46..=0x0C48 => Script::Telugu, 0x0C4A..=0x0C4D => Script::Telugu, 0x0C55..=0x0C56 => Script::Telugu, 0x0C58..=0x0C5A => Script::Telugu, 0x0C60..=0x0C61 => Script::Telugu, 0x0C62..=0x0C63 => Script::Telugu, 0x0C66..=0x0C6F => Script::Telugu, 0x0C78..=0x0C7E => Script::Telugu, 0x0C7F => Script::Telugu, 0x0C80 => Script::Kannada, 0x0C81 => Script::Kannada, 0x0C82..=0x0C83 => Script::Kannada, 0x0C85..=0x0C8C => Script::Kannada, 0x0C8E..=0x0C90 => Script::Kannada, 0x0C92..=0x0CA8 => Script::Kannada, 0x0CAA..=0x0CB3 => Script::Kannada, 0x0CB5..=0x0CB9 => Script::Kannada, 0x0CBC => Script::Kannada, 0x0CBD => Script::Kannada, 0x0CBE => Script::Kannada, 0x0CBF => Script::Kannada, 0x0CC0..=0x0CC4 => Script::Kannada, 0x0CC6 => Script::Kannada, 0x0CC7..=0x0CC8 => Script::Kannada, 0x0CCA..=0x0CCB => Script::Kannada, 0x0CCC..=0x0CCD => Script::Kannada, 0x0CD5..=0x0CD6 => Script::Kannada, 0x0CDE => Script::Kannada, 0x0CE0..=0x0CE1 => Script::Kannada, 0x0CE2..=0x0CE3 => Script::Kannada, 0x0CE6..=0x0CEF => Script::Kannada, 0x0CF1..=0x0CF2 => Script::Kannada, 0x0D01 => Script::Malayalam, 0x0D02..=0x0D03 => Script::Malayalam, 0x0D05..=0x0D0C => Script::Malayalam, 0x0D0E..=0x0D10 => Script::Malayalam, 0x0D12..=0x0D3A => Script::Malayalam, 0x0D3D => Script::Malayalam, 0x0D3E..=0x0D40 => Script::Malayalam, 0x0D41..=0x0D44 => Script::Malayalam, 0x0D46..=0x0D48 => Script::Malayalam, 0x0D4A..=0x0D4C => Script::Malayalam, 0x0D4D => Script::Malayalam, 0x0D4E => Script::Malayalam, 0x0D4F => Script::Malayalam, 0x0D54..=0x0D56 => Script::Malayalam, 0x0D57 => Script::Malayalam, 0x0D58..=0x0D5E => Script::Malayalam, 0x0D5F..=0x0D61 => Script::Malayalam, 0x0D62..=0x0D63 => Script::Malayalam, 0x0D66..=0x0D6F => Script::Malayalam, 0x0D70..=0x0D78 => Script::Malayalam, 0x0D79 => Script::Malayalam, 0x0D7A..=0x0D7F => Script::Malayalam, 0x0D82..=0x0D83 => Script::Sinhala, 0x0D85..=0x0D96 => Script::Sinhala, 0x0D9A..=0x0DB1 => Script::Sinhala, 0x0DB3..=0x0DBB => Script::Sinhala, 0x0DBD => Script::Sinhala, 0x0DC0..=0x0DC6 => Script::Sinhala, 0x0DCA => Script::Sinhala, 0x0DCF..=0x0DD1 => Script::Sinhala, 0x0DD2..=0x0DD4 => Script::Sinhala, 0x0DD6 => Script::Sinhala, 0x0DD8..=0x0DDF => Script::Sinhala, 0x0DE6..=0x0DEF => Script::Sinhala, 0x0DF2..=0x0DF3 => Script::Sinhala, 0x0DF4 => Script::Sinhala, 0x111E1..=0x111F4 => Script::Sinhala, 0x0E01..=0x0E30 => Script::Thai, 0x0E31 => Script::Thai, 0x0E32..=0x0E33 => Script::Thai, 0x0E34..=0x0E3A => Script::Thai, 0x0E40..=0x0E45 => Script::Thai, 0x0E46 => Script::Thai, 0x0E47..=0x0E4E => Script::Thai, 0x0E4F => Script::Thai, 0x0E50..=0x0E59 => Script::Thai, 0x0E5A..=0x0E5B => Script::Thai, 0x0E81..=0x0E82 => Script::Lao, 0x0E84 => Script::Lao, 0x0E87..=0x0E88 => Script::Lao, 0x0E8A => Script::Lao, 0x0E8D => Script::Lao, 0x0E94..=0x0E97 => Script::Lao, 0x0E99..=0x0E9F => Script::Lao, 0x0EA1..=0x0EA3 => Script::Lao, 0x0EA5 => Script::Lao, 0x0EA7 => Script::Lao, 0x0EAA..=0x0EAB => Script::Lao, 0x0EAD..=0x0EB0 => Script::Lao, 0x0EB1 => Script::Lao, 0x0EB2..=0x0EB3 => Script::Lao, 0x0EB4..=0x0EB9 => Script::Lao, 0x0EBB..=0x0EBC => Script::Lao, 0x0EBD => Script::Lao, 0x0EC0..=0x0EC4 => Script::Lao, 0x0EC6 => Script::Lao, 0x0EC8..=0x0ECD => Script::Lao, 0x0ED0..=0x0ED9 => Script::Lao, 0x0EDC..=0x0EDF => Script::Lao, 0x0F00 => Script::Tibetan, 0x0F01..=0x0F03 => Script::Tibetan, 0x0F04..=0x0F12 => Script::Tibetan, 0x0F13 => Script::Tibetan, 0x0F14 => Script::Tibetan, 0x0F15..=0x0F17 => Script::Tibetan, 0x0F18..=0x0F19 => Script::Tibetan, 0x0F1A..=0x0F1F => Script::Tibetan, 0x0F20..=0x0F29 => Script::Tibetan, 0x0F2A..=0x0F33 => Script::Tibetan, 0x0F34 => Script::Tibetan, 0x0F35 => Script::Tibetan, 0x0F36 => Script::Tibetan, 0x0F37 => Script::Tibetan, 0x0F38 => Script::Tibetan, 0x0F39 => Script::Tibetan, 0x0F3A => Script::Tibetan, 0x0F3B => Script::Tibetan, 0x0F3C => Script::Tibetan, 0x0F3D => Script::Tibetan, 0x0F3E..=0x0F3F => Script::Tibetan, 0x0F40..=0x0F47 => Script::Tibetan, 0x0F49..=0x0F6C => Script::Tibetan, 0x0F71..=0x0F7E => Script::Tibetan, 0x0F7F => Script::Tibetan, 0x0F80..=0x0F84 => Script::Tibetan, 0x0F85 => Script::Tibetan, 0x0F86..=0x0F87 => Script::Tibetan, 0x0F88..=0x0F8C => Script::Tibetan, 0x0F8D..=0x0F97 => Script::Tibetan, 0x0F99..=0x0FBC => Script::Tibetan, 0x0FBE..=0x0FC5 => Script::Tibetan, 0x0FC6 => Script::Tibetan, 0x0FC7..=0x0FCC => Script::Tibetan, 0x0FCE..=0x0FCF => Script::Tibetan, 0x0FD0..=0x0FD4 => Script::Tibetan, 0x0FD9..=0x0FDA => Script::Tibetan, 0x1000..=0x102A => Script::Myanmar, 0x102B..=0x102C => Script::Myanmar, 0x102D..=0x1030 => Script::Myanmar, 0x1031 => Script::Myanmar, 0x1032..=0x1037 => Script::Myanmar, 0x1038 => Script::Myanmar, 0x1039..=0x103A => Script::Myanmar, 0x103B..=0x103C => Script::Myanmar, 0x103D..=0x103E => Script::Myanmar, 0x103F => Script::Myanmar, 0x1040..=0x1049 => Script::Myanmar, 0x104A..=0x104F => Script::Myanmar, 0x1050..=0x1055 => Script::Myanmar, 0x1056..=0x1057 => Script::Myanmar, 0x1058..=0x1059 => Script::Myanmar, 0x105A..=0x105D => Script::Myanmar, 0x105E..=0x1060 => Script::Myanmar, 0x1061 => Script::Myanmar, 0x1062..=0x1064 => Script::Myanmar, 0x1065..=0x1066 => Script::Myanmar, 0x1067..=0x106D => Script::Myanmar, 0x106E..=0x1070 => Script::Myanmar, 0x1071..=0x1074 => Script::Myanmar, 0x1075..=0x1081 => Script::Myanmar, 0x1082 => Script::Myanmar, 0x1083..=0x1084 => Script::Myanmar, 0x1085..=0x1086 => Script::Myanmar, 0x1087..=0x108C => Script::Myanmar, 0x108D => Script::Myanmar, 0x108E => Script::Myanmar, 0x108F => Script::Myanmar, 0x1090..=0x1099 => Script::Myanmar, 0x109A..=0x109C => Script::Myanmar, 0x109D => Script::Myanmar, 0x109E..=0x109F => Script::Myanmar, 0xA9E0..=0xA9E4 => Script::Myanmar, 0xA9E5 => Script::Myanmar, 0xA9E6 => Script::Myanmar, 0xA9E7..=0xA9EF => Script::Myanmar, 0xA9F0..=0xA9F9 => Script::Myanmar, 0xA9FA..=0xA9FE => Script::Myanmar, 0xAA60..=0xAA6F => Script::Myanmar, 0xAA70 => Script::Myanmar, 0xAA71..=0xAA76 => Script::Myanmar, 0xAA77..=0xAA79 => Script::Myanmar, 0xAA7A => Script::Myanmar, 0xAA7B => Script::Myanmar, 0xAA7C => Script::Myanmar, 0xAA7D => Script::Myanmar, 0xAA7E..=0xAA7F => Script::Myanmar, 0x10A0..=0x10C5 => Script::Georgian, 0x10C7 => Script::Georgian, 0x10CD => Script::Georgian, 0x10D0..=0x10FA => Script::Georgian, 0x10FC => Script::Georgian, 0x10FD..=0x10FF => Script::Georgian, 0x2D00..=0x2D25 => Script::Georgian, 0x2D27 => Script::Georgian, 0x2D2D => Script::Georgian, 0x1100..=0x11FF => Script::Hangul, 0x302E..=0x302F => Script::Hangul, 0x3131..=0x318E => Script::Hangul, 0x3200..=0x321E => Script::Hangul, 0x3260..=0x327E => Script::Hangul, 0xA960..=0xA97C => Script::Hangul, 0xAC00..=0xD7A3 => Script::Hangul, 0xD7B0..=0xD7C6 => Script::Hangul, 0xD7CB..=0xD7FB => Script::Hangul, 0xFFA0..=0xFFBE => Script::Hangul, 0xFFC2..=0xFFC7 => Script::Hangul, 0xFFCA..=0xFFCF => Script::Hangul, 0xFFD2..=0xFFD7 => Script::Hangul, 0xFFDA..=0xFFDC => Script::Hangul, 0x1200..=0x1248 => Script::Ethiopic, 0x124A..=0x124D => Script::Ethiopic, 0x1250..=0x1256 => Script::Ethiopic, 0x1258 => Script::Ethiopic, 0x125A..=0x125D => Script::Ethiopic, 0x1260..=0x1288 => Script::Ethiopic, 0x128A..=0x128D => Script::Ethiopic, 0x1290..=0x12B0 => Script::Ethiopic, 0x12B2..=0x12B5 => Script::Ethiopic, 0x12B8..=0x12BE => Script::Ethiopic, 0x12C0 => Script::Ethiopic, 0x12C2..=0x12C5 => Script::Ethiopic, 0x12C8..=0x12D6 => Script::Ethiopic, 0x12D8..=0x1310 => Script::Ethiopic, 0x1312..=0x1315 => Script::Ethiopic, 0x1318..=0x135A => Script::Ethiopic, 0x135D..=0x135F => Script::Ethiopic, 0x1360..=0x1368 => Script::Ethiopic, 0x1369..=0x137C => Script::Ethiopic, 0x1380..=0x138F => Script::Ethiopic, 0x1390..=0x1399 => Script::Ethiopic, 0x2D80..=0x2D96 => Script::Ethiopic, 0x2DA0..=0x2DA6 => Script::Ethiopic, 0x2DA8..=0x2DAE => Script::Ethiopic, 0x2DB0..=0x2DB6 => Script::Ethiopic, 0x2DB8..=0x2DBE => Script::Ethiopic, 0x2DC0..=0x2DC6 => Script::Ethiopic, 0x2DC8..=0x2DCE => Script::Ethiopic, 0x2DD0..=0x2DD6 => Script::Ethiopic, 0x2DD8..=0x2DDE => Script::Ethiopic, 0xAB01..=0xAB06 => Script::Ethiopic, 0xAB09..=0xAB0E => Script::Ethiopic, 0xAB11..=0xAB16 => Script::Ethiopic, 0xAB20..=0xAB26 => Script::Ethiopic, 0xAB28..=0xAB2E => Script::Ethiopic, 0x13A0..=0x13F5 => Script::Cherokee, 0x13F8..=0x13FD => Script::Cherokee, 0xAB70..=0xABBF => Script::Cherokee, 0x1400 => Script::CanadianAboriginal, 0x1401..=0x166C => Script::CanadianAboriginal, 0x166D..=0x166E => Script::CanadianAboriginal, 0x166F..=0x167F => Script::CanadianAboriginal, 0x18B0..=0x18F5 => Script::CanadianAboriginal, 0x1680 => Script::Ogham, 0x1681..=0x169A => Script::Ogham, 0x169B => Script::Ogham, 0x169C => Script::Ogham, 0x16A0..=0x16EA => Script::Runic, 0x16EE..=0x16F0 => Script::Runic, 0x16F1..=0x16F8 => Script::Runic, 0x1780..=0x17B3 => Script::Khmer, 0x17B4..=0x17B5 => Script::Khmer, 0x17B6 => Script::Khmer, 0x17B7..=0x17BD => Script::Khmer, 0x17BE..=0x17C5 => Script::Khmer, 0x17C6 => Script::Khmer, 0x17C7..=0x17C8 => Script::Khmer, 0x17C9..=0x17D3 => Script::Khmer, 0x17D4..=0x17D6 => Script::Khmer, 0x17D7 => Script::Khmer, 0x17D8..=0x17DA => Script::Khmer, 0x17DB => Script::Khmer, 0x17DC => Script::Khmer, 0x17DD => Script::Khmer, 0x17E0..=0x17E9 => Script::Khmer, 0x17F0..=0x17F9 => Script::Khmer, 0x19E0..=0x19FF => Script::Khmer, 0x1800..=0x1801 => Script::Mongolian, 0x1804 => Script::Mongolian, 0x1806 => Script::Mongolian, 0x1807..=0x180A => Script::Mongolian, 0x180B..=0x180D => Script::Mongolian, 0x180E => Script::Mongolian, 0x1810..=0x1819 => Script::Mongolian, 0x1820..=0x1842 => Script::Mongolian, 0x1843 => Script::Mongolian, 0x1844..=0x1877 => Script::Mongolian, 0x1880..=0x1884 => Script::Mongolian, 0x1885..=0x1886 => Script::Mongolian, 0x1887..=0x18A8 => Script::Mongolian, 0x18A9 => Script::Mongolian, 0x18AA => Script::Mongolian, 0x11660..=0x1166C => Script::Mongolian, 0x3041..=0x3096 => Script::Hiragana, 0x309D..=0x309E => Script::Hiragana, 0x309F => Script::Hiragana, 0x1B001 => Script::Hiragana, 0x1F200 => Script::Hiragana, 0x30A1..=0x30FA => Script::Katakana, 0x30FD..=0x30FE => Script::Katakana, 0x30FF => Script::Katakana, 0x31F0..=0x31FF => Script::Katakana, 0x32D0..=0x32FE => Script::Katakana, 0x3300..=0x3357 => Script::Katakana, 0xFF66..=0xFF6F => Script::Katakana, 0xFF71..=0xFF9D => Script::Katakana, 0x1B000 => Script::Katakana, 0x02EA..=0x02EB => Script::Bopomofo, 0x3105..=0x312D => Script::Bopomofo, 0x31A0..=0x31BA => Script::Bopomofo, 0x2E80..=0x2E99 => Script::Han, 0x2E9B..=0x2EF3 => Script::Han, 0x2F00..=0x2FD5 => Script::Han, 0x3005 => Script::Han, 0x3007 => Script::Han, 0x3021..=0x3029 => Script::Han, 0x3038..=0x303A => Script::Han, 0x303B => Script::Han, 0x3400..=0x4DB5 => Script::Han, 0x4E00..=0x9FD5 => Script::Han, 0xF900..=0xFA6D => Script::Han, 0xFA70..=0xFAD9 => Script::Han, 0x20000..=0x2A6D6 => Script::Han, 0x2A700..=0x2B734 => Script::Han, 0x2B740..=0x2B81D => Script::Han, 0x2B820..=0x2CEA1 => Script::Han, 0x2F800..=0x2FA1D => Script::Han, 0xA000..=0xA014 => Script::Yi, 0xA015 => Script::Yi, 0xA016..=0xA48C => Script::Yi, 0xA490..=0xA4C6 => Script::Yi, 0x10300..=0x1031F => Script::OldItalic, 0x10320..=0x10323 => Script::OldItalic, 0x10330..=0x10340 => Script::Gothic, 0x10341 => Script::Gothic, 0x10342..=0x10349 => Script::Gothic, 0x1034A => Script::Gothic, 0x10400..=0x1044F => Script::Deseret, 0x0300..=0x036F => Script::Inherited, 0x0485..=0x0486 => Script::Inherited, 0x064B..=0x0655 => Script::Inherited, 0x0670 => Script::Inherited, 0x0951..=0x0952 => Script::Inherited, 0x1AB0..=0x1ABD => Script::Inherited, 0x1ABE => Script::Inherited, 0x1CD0..=0x1CD2 => Script::Inherited, 0x1CD4..=0x1CE0 => Script::Inherited, 0x1CE2..=0x1CE8 => Script::Inherited, 0x1CED => Script::Inherited, 0x1CF4 => Script::Inherited, 0x1CF8..=0x1CF9 => Script::Inherited, 0x1DC0..=0x1DF5 => Script::Inherited, 0x1DFB..=0x1DFF => Script::Inherited, 0x200C..=0x200D => Script::Inherited, 0x20D0..=0x20DC => Script::Inherited, 0x20DD..=0x20E0 => Script::Inherited, 0x20E1 => Script::Inherited, 0x20E2..=0x20E4 => Script::Inherited, 0x20E5..=0x20F0 => Script::Inherited, 0x302A..=0x302D => Script::Inherited, 0x3099..=0x309A => Script::Inherited, 0xFE00..=0xFE0F => Script::Inherited, 0xFE20..=0xFE2D => Script::Inherited, 0x101FD => Script::Inherited, 0x102E0 => Script::Inherited, 0x1D167..=0x1D169 => Script::Inherited, 0x1D17B..=0x1D182 => Script::Inherited, 0x1D185..=0x1D18B => Script::Inherited, 0x1D1AA..=0x1D1AD => Script::Inherited, 0xE0100..=0xE01EF => Script::Inherited, 0x1700..=0x170C => Script::Tagalog, 0x170E..=0x1711 => Script::Tagalog, 0x1712..=0x1714 => Script::Tagalog, 0x1720..=0x1731 => Script::Hanunoo, 0x1732..=0x1734 => Script::Hanunoo, 0x1740..=0x1751 => Script::Buhid, 0x1752..=0x1753 => Script::Buhid, 0x1760..=0x176C => Script::Tagbanwa, 0x176E..=0x1770 => Script::Tagbanwa, 0x1772..=0x1773 => Script::Tagbanwa, 0x1900..=0x191E => Script::Limbu, 0x1920..=0x1922 => Script::Limbu, 0x1923..=0x1926 => Script::Limbu, 0x1927..=0x1928 => Script::Limbu, 0x1929..=0x192B => Script::Limbu, 0x1930..=0x1931 => Script::Limbu, 0x1932 => Script::Limbu, 0x1933..=0x1938 => Script::Limbu, 0x1939..=0x193B => Script::Limbu, 0x1940 => Script::Limbu, 0x1944..=0x1945 => Script::Limbu, 0x1946..=0x194F => Script::Limbu, 0x1950..=0x196D => Script::TaiLe, 0x1970..=0x1974 => Script::TaiLe, 0x10000..=0x1000B => Script::LinearB, 0x1000D..=0x10026 => Script::LinearB, 0x10028..=0x1003A => Script::LinearB, 0x1003C..=0x1003D => Script::LinearB, 0x1003F..=0x1004D => Script::LinearB, 0x10050..=0x1005D => Script::LinearB, 0x10080..=0x100FA => Script::LinearB, 0x10380..=0x1039D => Script::Ugaritic, 0x1039F => Script::Ugaritic, 0x10450..=0x1047F => Script::Shavian, 0x10480..=0x1049D => Script::Osmanya, 0x104A0..=0x104A9 => Script::Osmanya, 0x10800..=0x10805 => Script::Cypriot, 0x10808 => Script::Cypriot, 0x1080A..=0x10835 => Script::Cypriot, 0x10837..=0x10838 => Script::Cypriot, 0x1083C => Script::Cypriot, 0x1083F => Script::Cypriot, 0x2800..=0x28FF => Script::Braille, 0x1A00..=0x1A16 => Script::Buginese, 0x1A17..=0x1A18 => Script::Buginese, 0x1A19..=0x1A1A => Script::Buginese, 0x1A1B => Script::Buginese, 0x1A1E..=0x1A1F => Script::Buginese, 0x03E2..=0x03EF => Script::Coptic, 0x2C80..=0x2CE4 => Script::Coptic, 0x2CE5..=0x2CEA => Script::Coptic, 0x2CEB..=0x2CEE => Script::Coptic, 0x2CEF..=0x2CF1 => Script::Coptic, 0x2CF2..=0x2CF3 => Script::Coptic, 0x2CF9..=0x2CFC => Script::Coptic, 0x2CFD => Script::Coptic, 0x2CFE..=0x2CFF => Script::Coptic, 0x1980..=0x19AB => Script::NewTaiLue, 0x19B0..=0x19C9 => Script::NewTaiLue, 0x19D0..=0x19D9 => Script::NewTaiLue, 0x19DA => Script::NewTaiLue, 0x19DE..=0x19DF => Script::NewTaiLue, 0x2C00..=0x2C2E => Script::Glagolitic, 0x2C30..=0x2C5E => Script::Glagolitic, 0x1E000..=0x1E006 => Script::Glagolitic, 0x1E008..=0x1E018 => Script::Glagolitic, 0x1E01B..=0x1E021 => Script::Glagolitic, 0x1E023..=0x1E024 => Script::Glagolitic, 0x1E026..=0x1E02A => Script::Glagolitic, 0x2D30..=0x2D67 => Script::Tifinagh, 0x2D6F => Script::Tifinagh, 0x2D70 => Script::Tifinagh, 0x2D7F => Script::Tifinagh, 0xA800..=0xA801 => Script::SylotiNagri, 0xA802 => Script::SylotiNagri, 0xA803..=0xA805 => Script::SylotiNagri, 0xA806 => Script::SylotiNagri, 0xA807..=0xA80A => Script::SylotiNagri, 0xA80B => Script::SylotiNagri, 0xA80C..=0xA822 => Script::SylotiNagri, 0xA823..=0xA824 => Script::SylotiNagri, 0xA825..=0xA826 => Script::SylotiNagri, 0xA827 => Script::SylotiNagri, 0xA828..=0xA82B => Script::SylotiNagri, 0x103A0..=0x103C3 => Script::OldPersian, 0x103C8..=0x103CF => Script::OldPersian, 0x103D0 => Script::OldPersian, 0x103D1..=0x103D5 => Script::OldPersian, 0x10A00 => Script::Kharoshthi, 0x10A01..=0x10A03 => Script::Kharoshthi, 0x10A05..=0x10A06 => Script::Kharoshthi, 0x10A0C..=0x10A0F => Script::Kharoshthi, 0x10A10..=0x10A13 => Script::Kharoshthi, 0x10A15..=0x10A17 => Script::Kharoshthi, 0x10A19..=0x10A33 => Script::Kharoshthi, 0x10A38..=0x10A3A => Script::Kharoshthi, 0x10A3F => Script::Kharoshthi, 0x10A40..=0x10A47 => Script::Kharoshthi, 0x10A50..=0x10A58 => Script::Kharoshthi, 0x1B00..=0x1B03 => Script::Balinese, 0x1B04 => Script::Balinese, 0x1B05..=0x1B33 => Script::Balinese, 0x1B34 => Script::Balinese, 0x1B35 => Script::Balinese, 0x1B36..=0x1B3A => Script::Balinese, 0x1B3B => Script::Balinese, 0x1B3C => Script::Balinese, 0x1B3D..=0x1B41 => Script::Balinese, 0x1B42 => Script::Balinese, 0x1B43..=0x1B44 => Script::Balinese, 0x1B45..=0x1B4B => Script::Balinese, 0x1B50..=0x1B59 => Script::Balinese, 0x1B5A..=0x1B60 => Script::Balinese, 0x1B61..=0x1B6A => Script::Balinese, 0x1B6B..=0x1B73 => Script::Balinese, 0x1B74..=0x1B7C => Script::Balinese, 0x12000..=0x12399 => Script::Cuneiform, 0x12400..=0x1246E => Script::Cuneiform, 0x12470..=0x12474 => Script::Cuneiform, 0x12480..=0x12543 => Script::Cuneiform, 0x10900..=0x10915 => Script::Phoenician, 0x10916..=0x1091B => Script::Phoenician, 0x1091F => Script::Phoenician, 0xA840..=0xA873 => Script::PhagsPa, 0xA874..=0xA877 => Script::PhagsPa, 0x07C0..=0x07C9 => Script::Nko, 0x07CA..=0x07EA => Script::Nko, 0x07EB..=0x07F3 => Script::Nko, 0x07F4..=0x07F5 => Script::Nko, 0x07F6 => Script::Nko, 0x07F7..=0x07F9 => Script::Nko, 0x07FA => Script::Nko, 0x1B80..=0x1B81 => Script::Sundanese, 0x1B82 => Script::Sundanese, 0x1B83..=0x1BA0 => Script::Sundanese, 0x1BA1 => Script::Sundanese, 0x1BA2..=0x1BA5 => Script::Sundanese, 0x1BA6..=0x1BA7 => Script::Sundanese, 0x1BA8..=0x1BA9 => Script::Sundanese, 0x1BAA => Script::Sundanese, 0x1BAB..=0x1BAD => Script::Sundanese, 0x1BAE..=0x1BAF => Script::Sundanese, 0x1BB0..=0x1BB9 => Script::Sundanese, 0x1BBA..=0x1BBF => Script::Sundanese, 0x1CC0..=0x1CC7 => Script::Sundanese, 0x1C00..=0x1C23 => Script::Lepcha, 0x1C24..=0x1C2B => Script::Lepcha, 0x1C2C..=0x1C33 => Script::Lepcha, 0x1C34..=0x1C35 => Script::Lepcha, 0x1C36..=0x1C37 => Script::Lepcha, 0x1C3B..=0x1C3F => Script::Lepcha, 0x1C40..=0x1C49 => Script::Lepcha, 0x1C4D..=0x1C4F => Script::Lepcha, 0x1C50..=0x1C59 => Script::OlChiki, 0x1C5A..=0x1C77 => Script::OlChiki, 0x1C78..=0x1C7D => Script::OlChiki, 0x1C7E..=0x1C7F => Script::OlChiki, 0xA500..=0xA60B => Script::Vai, 0xA60C => Script::Vai, 0xA60D..=0xA60F => Script::Vai, 0xA610..=0xA61F => Script::Vai, 0xA620..=0xA629 => Script::Vai, 0xA62A..=0xA62B => Script::Vai, 0xA880..=0xA881 => Script::Saurashtra, 0xA882..=0xA8B3 => Script::Saurashtra, 0xA8B4..=0xA8C3 => Script::Saurashtra, 0xA8C4..=0xA8C5 => Script::Saurashtra, 0xA8CE..=0xA8CF => Script::Saurashtra, 0xA8D0..=0xA8D9 => Script::Saurashtra, 0xA900..=0xA909 => Script::KayahLi, 0xA90A..=0xA925 => Script::KayahLi, 0xA926..=0xA92D => Script::KayahLi, 0xA92F => Script::KayahLi, 0xA930..=0xA946 => Script::Rejang, 0xA947..=0xA951 => Script::Rejang, 0xA952..=0xA953 => Script::Rejang, 0xA95F => Script::Rejang, 0x10280..=0x1029C => Script::Lycian, 0x102A0..=0x102D0 => Script::Carian, 0x10920..=0x10939 => Script::Lydian, 0x1093F => Script::Lydian, 0xAA00..=0xAA28 => Script::Cham, 0xAA29..=0xAA2E => Script::Cham, 0xAA2F..=0xAA30 => Script::Cham, 0xAA31..=0xAA32 => Script::Cham, 0xAA33..=0xAA34 => Script::Cham, 0xAA35..=0xAA36 => Script::Cham, 0xAA40..=0xAA42 => Script::Cham, 0xAA43 => Script::Cham, 0xAA44..=0xAA4B => Script::Cham, 0xAA4C => Script::Cham, 0xAA4D => Script::Cham, 0xAA50..=0xAA59 => Script::Cham, 0xAA5C..=0xAA5F => Script::Cham, 0x1A20..=0x1A54 => Script::TaiTham, 0x1A55 => Script::TaiTham, 0x1A56 => Script::TaiTham, 0x1A57 => Script::TaiTham, 0x1A58..=0x1A5E => Script::TaiTham, 0x1A60 => Script::TaiTham, 0x1A61 => Script::TaiTham, 0x1A62 => Script::TaiTham, 0x1A63..=0x1A64 => Script::TaiTham, 0x1A65..=0x1A6C => Script::TaiTham, 0x1A6D..=0x1A72 => Script::TaiTham, 0x1A73..=0x1A7C => Script::TaiTham, 0x1A7F => Script::TaiTham, 0x1A80..=0x1A89 => Script::TaiTham, 0x1A90..=0x1A99 => Script::TaiTham, 0x1AA0..=0x1AA6 => Script::TaiTham, 0x1AA7 => Script::TaiTham, 0x1AA8..=0x1AAD => Script::TaiTham, 0xAA80..=0xAAAF => Script::TaiViet, 0xAAB0 => Script::TaiViet, 0xAAB1 => Script::TaiViet, 0xAAB2..=0xAAB4 => Script::TaiViet, 0xAAB5..=0xAAB6 => Script::TaiViet, 0xAAB7..=0xAAB8 => Script::TaiViet, 0xAAB9..=0xAABD => Script::TaiViet, 0xAABE..=0xAABF => Script::TaiViet, 0xAAC0 => Script::TaiViet, 0xAAC1 => Script::TaiViet, 0xAAC2 => Script::TaiViet, 0xAADB..=0xAADC => Script::TaiViet, 0xAADD => Script::TaiViet, 0xAADE..=0xAADF => Script::TaiViet, 0x10B00..=0x10B35 => Script::Avestan, 0x10B39..=0x10B3F => Script::Avestan, 0x13000..=0x1342E => Script::EgyptianHieroglyphs, 0x0800..=0x0815 => Script::Samaritan, 0x0816..=0x0819 => Script::Samaritan, 0x081A => Script::Samaritan, 0x081B..=0x0823 => Script::Samaritan, 0x0824 => Script::Samaritan, 0x0825..=0x0827 => Script::Samaritan, 0x0828 => Script::Samaritan, 0x0829..=0x082D => Script::Samaritan, 0x0830..=0x083E => Script::Samaritan, 0xA4D0..=0xA4F7 => Script::Lisu, 0xA4F8..=0xA4FD => Script::Lisu, 0xA4FE..=0xA4FF => Script::Lisu, 0xA6A0..=0xA6E5 => Script::Bamum, 0xA6E6..=0xA6EF => Script::Bamum, 0xA6F0..=0xA6F1 => Script::Bamum, 0xA6F2..=0xA6F7 => Script::Bamum, 0x16800..=0x16A38 => Script::Bamum, 0xA980..=0xA982 => Script::Javanese, 0xA983 => Script::Javanese, 0xA984..=0xA9B2 => Script::Javanese, 0xA9B3 => Script::Javanese, 0xA9B4..=0xA9B5 => Script::Javanese, 0xA9B6..=0xA9B9 => Script::Javanese, 0xA9BA..=0xA9BB => Script::Javanese, 0xA9BC => Script::Javanese, 0xA9BD..=0xA9C0 => Script::Javanese, 0xA9C1..=0xA9CD => Script::Javanese, 0xA9D0..=0xA9D9 => Script::Javanese, 0xA9DE..=0xA9DF => Script::Javanese, 0xAAE0..=0xAAEA => Script::MeeteiMayek, 0xAAEB => Script::MeeteiMayek, 0xAAEC..=0xAAED => Script::MeeteiMayek, 0xAAEE..=0xAAEF => Script::MeeteiMayek, 0xAAF0..=0xAAF1 => Script::MeeteiMayek, 0xAAF2 => Script::MeeteiMayek, 0xAAF3..=0xAAF4 => Script::MeeteiMayek, 0xAAF5 => Script::MeeteiMayek, 0xAAF6 => Script::MeeteiMayek, 0xABC0..=0xABE2 => Script::MeeteiMayek, 0xABE3..=0xABE4 => Script::MeeteiMayek, 0xABE5 => Script::MeeteiMayek, 0xABE6..=0xABE7 => Script::MeeteiMayek, 0xABE8 => Script::MeeteiMayek, 0xABE9..=0xABEA => Script::MeeteiMayek, 0xABEB => Script::MeeteiMayek, 0xABEC => Script::MeeteiMayek, 0xABED => Script::MeeteiMayek, 0xABF0..=0xABF9 => Script::MeeteiMayek, 0x10840..=0x10855 => Script::ImperialAramaic, 0x10857 => Script::ImperialAramaic, 0x10858..=0x1085F => Script::ImperialAramaic, 0x10A60..=0x10A7C => Script::OldSouthArabian, 0x10A7D..=0x10A7E => Script::OldSouthArabian, 0x10A7F => Script::OldSouthArabian, 0x10B40..=0x10B55 => Script::InscriptionalParthian, 0x10B58..=0x10B5F => Script::InscriptionalParthian, 0x10B60..=0x10B72 => Script::InscriptionalPahlavi, 0x10B78..=0x10B7F => Script::InscriptionalPahlavi, 0x10C00..=0x10C48 => Script::OldTurkic, 0x11080..=0x11081 => Script::Kaithi, 0x11082 => Script::Kaithi, 0x11083..=0x110AF => Script::Kaithi, 0x110B0..=0x110B2 => Script::Kaithi, 0x110B3..=0x110B6 => Script::Kaithi, 0x110B7..=0x110B8 => Script::Kaithi, 0x110B9..=0x110BA => Script::Kaithi, 0x110BB..=0x110BC => Script::Kaithi, 0x110BD => Script::Kaithi, 0x110BE..=0x110C1 => Script::Kaithi, 0x1BC0..=0x1BE5 => Script::Batak, 0x1BE6 => Script::Batak, 0x1BE7 => Script::Batak, 0x1BE8..=0x1BE9 => Script::Batak, 0x1BEA..=0x1BEC => Script::Batak, 0x1BED => Script::Batak, 0x1BEE => Script::Batak, 0x1BEF..=0x1BF1 => Script::Batak, 0x1BF2..=0x1BF3 => Script::Batak, 0x1BFC..=0x1BFF => Script::Batak, 0x11000 => Script::Brahmi, 0x11001 => Script::Brahmi, 0x11002 => Script::Brahmi, 0x11003..=0x11037 => Script::Brahmi, 0x11038..=0x11046 => Script::Brahmi, 0x11047..=0x1104D => Script::Brahmi, 0x11052..=0x11065 => Script::Brahmi, 0x11066..=0x1106F => Script::Brahmi, 0x1107F => Script::Brahmi, 0x0840..=0x0858 => Script::Mandaic, 0x0859..=0x085B => Script::Mandaic, 0x085E => Script::Mandaic, 0x11100..=0x11102 => Script::Chakma, 0x11103..=0x11126 => Script::Chakma, 0x11127..=0x1112B => Script::Chakma, 0x1112C => Script::Chakma, 0x1112D..=0x11134 => Script::Chakma, 0x11136..=0x1113F => Script::Chakma, 0x11140..=0x11143 => Script::Chakma, 0x109A0..=0x109B7 => Script::MeroiticCursive, 0x109BC..=0x109BD => Script::MeroiticCursive, 0x109BE..=0x109BF => Script::MeroiticCursive, 0x109C0..=0x109CF => Script::MeroiticCursive, 0x109D2..=0x109FF => Script::MeroiticCursive, 0x10980..=0x1099F => Script::MeroiticHieroglyphs, 0x16F00..=0x16F44 => Script::Miao, 0x16F50 => Script::Miao, 0x16F51..=0x16F7E => Script::Miao, 0x16F8F..=0x16F92 => Script::Miao, 0x16F93..=0x16F9F => Script::Miao, 0x11180..=0x11181 => Script::Sharada, 0x11182 => Script::Sharada, 0x11183..=0x111B2 => Script::Sharada, 0x111B3..=0x111B5 => Script::Sharada, 0x111B6..=0x111BE => Script::Sharada, 0x111BF..=0x111C0 => Script::Sharada, 0x111C1..=0x111C4 => Script::Sharada, 0x111C5..=0x111C9 => Script::Sharada, 0x111CA..=0x111CC => Script::Sharada, 0x111CD => Script::Sharada, 0x111D0..=0x111D9 => Script::Sharada, 0x111DA => Script::Sharada, 0x111DB => Script::Sharada, 0x111DC => Script::Sharada, 0x111DD..=0x111DF => Script::Sharada, 0x110D0..=0x110E8 => Script::SoraSompeng, 0x110F0..=0x110F9 => Script::SoraSompeng, 0x11680..=0x116AA => Script::Takri, 0x116AB => Script::Takri, 0x116AC => Script::Takri, 0x116AD => Script::Takri, 0x116AE..=0x116AF => Script::Takri, 0x116B0..=0x116B5 => Script::Takri, 0x116B6 => Script::Takri, 0x116B7 => Script::Takri, 0x116C0..=0x116C9 => Script::Takri, 0x10530..=0x10563 => Script::CaucasianAlbanian, 0x1056F => Script::CaucasianAlbanian, 0x16AD0..=0x16AED => Script::BassaVah, 0x16AF0..=0x16AF4 => Script::BassaVah, 0x16AF5 => Script::BassaVah, 0x1BC00..=0x1BC6A => Script::Duployan, 0x1BC70..=0x1BC7C => Script::Duployan, 0x1BC80..=0x1BC88 => Script::Duployan, 0x1BC90..=0x1BC99 => Script::Duployan, 0x1BC9C => Script::Duployan, 0x1BC9D..=0x1BC9E => Script::Duployan, 0x1BC9F => Script::Duployan, 0x10500..=0x10527 => Script::Elbasan, 0x11300..=0x11301 => Script::Grantha, 0x11302..=0x11303 => Script::Grantha, 0x11305..=0x1130C => Script::Grantha, 0x1130F..=0x11310 => Script::Grantha, 0x11313..=0x11328 => Script::Grantha, 0x1132A..=0x11330 => Script::Grantha, 0x11332..=0x11333 => Script::Grantha, 0x11335..=0x11339 => Script::Grantha, 0x1133C => Script::Grantha, 0x1133D => Script::Grantha, 0x1133E..=0x1133F => Script::Grantha, 0x11340 => Script::Grantha, 0x11341..=0x11344 => Script::Grantha, 0x11347..=0x11348 => Script::Grantha, 0x1134B..=0x1134D => Script::Grantha, 0x11350 => Script::Grantha, 0x11357 => Script::Grantha, 0x1135D..=0x11361 => Script::Grantha, 0x11362..=0x11363 => Script::Grantha, 0x11366..=0x1136C => Script::Grantha, 0x11370..=0x11374 => Script::Grantha, 0x16B00..=0x16B2F => Script::PahawhHmong, 0x16B30..=0x16B36 => Script::PahawhHmong, 0x16B37..=0x16B3B => Script::PahawhHmong, 0x16B3C..=0x16B3F => Script::PahawhHmong, 0x16B40..=0x16B43 => Script::PahawhHmong, 0x16B44 => Script::PahawhHmong, 0x16B45 => Script::PahawhHmong, 0x16B50..=0x16B59 => Script::PahawhHmong, 0x16B5B..=0x16B61 => Script::PahawhHmong, 0x16B63..=0x16B77 => Script::PahawhHmong, 0x16B7D..=0x16B8F => Script::PahawhHmong, 0x11200..=0x11211 => Script::Khojki, 0x11213..=0x1122B => Script::Khojki, 0x1122C..=0x1122E => Script::Khojki, 0x1122F..=0x11231 => Script::Khojki, 0x11232..=0x11233 => Script::Khojki, 0x11234 => Script::Khojki, 0x11235 => Script::Khojki, 0x11236..=0x11237 => Script::Khojki, 0x11238..=0x1123D => Script::Khojki, 0x1123E => Script::Khojki, 0x10600..=0x10736 => Script::LinearA, 0x10740..=0x10755 => Script::LinearA, 0x10760..=0x10767 => Script::LinearA, 0x11150..=0x11172 => Script::Mahajani, 0x11173 => Script::Mahajani, 0x11174..=0x11175 => Script::Mahajani, 0x11176 => Script::Mahajani, 0x10AC0..=0x10AC7 => Script::Manichaean, 0x10AC8 => Script::Manichaean, 0x10AC9..=0x10AE4 => Script::Manichaean, 0x10AE5..=0x10AE6 => Script::Manichaean, 0x10AEB..=0x10AEF => Script::Manichaean, 0x10AF0..=0x10AF6 => Script::Manichaean, 0x1E800..=0x1E8C4 => Script::MendeKikakui, 0x1E8C7..=0x1E8CF => Script::MendeKikakui, 0x1E8D0..=0x1E8D6 => Script::MendeKikakui, 0x11600..=0x1162F => Script::Modi, 0x11630..=0x11632 => Script::Modi, 0x11633..=0x1163A => Script::Modi, 0x1163B..=0x1163C => Script::Modi, 0x1163D => Script::Modi, 0x1163E => Script::Modi, 0x1163F..=0x11640 => Script::Modi, 0x11641..=0x11643 => Script::Modi, 0x11644 => Script::Modi, 0x11650..=0x11659 => Script::Modi, 0x16A40..=0x16A5E => Script::Mro, 0x16A60..=0x16A69 => Script::Mro, 0x16A6E..=0x16A6F => Script::Mro, 0x10A80..=0x10A9C => Script::OldNorthArabian, 0x10A9D..=0x10A9F => Script::OldNorthArabian, 0x10880..=0x1089E => Script::Nabataean, 0x108A7..=0x108AF => Script::Nabataean, 0x10860..=0x10876 => Script::Palmyrene, 0x10877..=0x10878 => Script::Palmyrene, 0x10879..=0x1087F => Script::Palmyrene, 0x11AC0..=0x11AF8 => Script::PauCinHau, 0x10350..=0x10375 => Script::OldPermic, 0x10376..=0x1037A => Script::OldPermic, 0x10B80..=0x10B91 => Script::PsalterPahlavi, 0x10B99..=0x10B9C => Script::PsalterPahlavi, 0x10BA9..=0x10BAF => Script::PsalterPahlavi, 0x11580..=0x115AE => Script::Siddham, 0x115AF..=0x115B1 => Script::Siddham, 0x115B2..=0x115B5 => Script::Siddham, 0x115B8..=0x115BB => Script::Siddham, 0x115BC..=0x115BD => Script::Siddham, 0x115BE => Script::Siddham, 0x115BF..=0x115C0 => Script::Siddham, 0x115C1..=0x115D7 => Script::Siddham, 0x115D8..=0x115DB => Script::Siddham, 0x115DC..=0x115DD => Script::Siddham, 0x112B0..=0x112DE => Script::Khudawadi, 0x112DF => Script::Khudawadi, 0x112E0..=0x112E2 => Script::Khudawadi, 0x112E3..=0x112EA => Script::Khudawadi, 0x112F0..=0x112F9 => Script::Khudawadi, 0x11480..=0x114AF => Script::Tirhuta, 0x114B0..=0x114B2 => Script::Tirhuta, 0x114B3..=0x114B8 => Script::Tirhuta, 0x114B9 => Script::Tirhuta, 0x114BA => Script::Tirhuta, 0x114BB..=0x114BE => Script::Tirhuta, 0x114BF..=0x114C0 => Script::Tirhuta, 0x114C1 => Script::Tirhuta, 0x114C2..=0x114C3 => Script::Tirhuta, 0x114C4..=0x114C5 => Script::Tirhuta, 0x114C6 => Script::Tirhuta, 0x114C7 => Script::Tirhuta, 0x114D0..=0x114D9 => Script::Tirhuta, 0x118A0..=0x118DF => Script::WarangCiti, 0x118E0..=0x118E9 => Script::WarangCiti, 0x118EA..=0x118F2 => Script::WarangCiti, 0x118FF => Script::WarangCiti, 0x11700..=0x11719 => Script::Ahom, 0x1171D..=0x1171F => Script::Ahom, 0x11720..=0x11721 => Script::Ahom, 0x11722..=0x11725 => Script::Ahom, 0x11726 => Script::Ahom, 0x11727..=0x1172B => Script::Ahom, 0x11730..=0x11739 => Script::Ahom, 0x1173A..=0x1173B => Script::Ahom, 0x1173C..=0x1173E => Script::Ahom, 0x1173F => Script::Ahom, 0x14400..=0x14646 => Script::AnatolianHieroglyphs, 0x108E0..=0x108F2 => Script::Hatran, 0x108F4..=0x108F5 => Script::Hatran, 0x108FB..=0x108FF => Script::Hatran, 0x11280..=0x11286 => Script::Multani, 0x11288 => Script::Multani, 0x1128A..=0x1128D => Script::Multani, 0x1128F..=0x1129D => Script::Multani, 0x1129F..=0x112A8 => Script::Multani, 0x112A9 => Script::Multani, 0x10C80..=0x10CB2 => Script::OldHungarian, 0x10CC0..=0x10CF2 => Script::OldHungarian, 0x10CFA..=0x10CFF => Script::OldHungarian, 0x1D800..=0x1D9FF => Script::SignWriting, 0x1DA00..=0x1DA36 => Script::SignWriting, 0x1DA37..=0x1DA3A => Script::SignWriting, 0x1DA3B..=0x1DA6C => Script::SignWriting, 0x1DA6D..=0x1DA74 => Script::SignWriting, 0x1DA75 => Script::SignWriting, 0x1DA76..=0x1DA83 => Script::SignWriting, 0x1DA84 => Script::SignWriting, 0x1DA85..=0x1DA86 => Script::SignWriting, 0x1DA87..=0x1DA8B => Script::SignWriting, 0x1DA9B..=0x1DA9F => Script::SignWriting, 0x1DAA1..=0x1DAAF => Script::SignWriting, 0x1E900..=0x1E943 => Script::Adlam, 0x1E944..=0x1E94A => Script::Adlam, 0x1E950..=0x1E959 => Script::Adlam, 0x1E95E..=0x1E95F => Script::Adlam, 0x11C00..=0x11C08 => Script::Bhaiksuki, 0x11C0A..=0x11C2E => Script::Bhaiksuki, 0x11C2F => Script::Bhaiksuki, 0x11C30..=0x11C36 => Script::Bhaiksuki, 0x11C38..=0x11C3D => Script::Bhaiksuki, 0x11C3E => Script::Bhaiksuki, 0x11C3F => Script::Bhaiksuki, 0x11C40 => Script::Bhaiksuki, 0x11C41..=0x11C45 => Script::Bhaiksuki, 0x11C50..=0x11C59 => Script::Bhaiksuki, 0x11C5A..=0x11C6C => Script::Bhaiksuki, 0x11C70..=0x11C71 => Script::Marchen, 0x11C72..=0x11C8F => Script::Marchen, 0x11C92..=0x11CA7 => Script::Marchen, 0x11CA9 => Script::Marchen, 0x11CAA..=0x11CB0 => Script::Marchen, 0x11CB1 => Script::Marchen, 0x11CB2..=0x11CB3 => Script::Marchen, 0x11CB4 => Script::Marchen, 0x11CB5..=0x11CB6 => Script::Marchen, 0x11400..=0x11434 => Script::Newa, 0x11435..=0x11437 => Script::Newa, 0x11438..=0x1143F => Script::Newa, 0x11440..=0x11441 => Script::Newa, 0x11442..=0x11444 => Script::Newa, 0x11445 => Script::Newa, 0x11446 => Script::Newa, 0x11447..=0x1144A => Script::Newa, 0x1144B..=0x1144F => Script::Newa, 0x11450..=0x11459 => Script::Newa, 0x1145B => Script::Newa, 0x1145D => Script::Newa, 0x104B0..=0x104D3 => Script::Osage, 0x104D8..=0x104FB => Script::Osage, 0x16FE0 => Script::Tangut, 0x17000..=0x187EC => Script::Tangut, 0x18800..=0x18AF2 => Script::Tangut, _ => Script::Any, } } #[cfg(test)] mod tests { use super::*; #[test] fn test_unicode_script() { assert_eq!(Script::Han, get_script('京')); assert_eq!(Script::Han, get_script('太')); assert_eq!(Script::Hiragana, get_script('い')); assert_eq!(Script::Katakana, get_script('グ')); assert_eq!(Script::Common, get_script('ー')); assert_eq!(Script::Latin, get_script('a')); assert_eq!(Script::Latin, get_script('A')); assert_eq!(Script::Common, get_script('0')); assert_eq!(Script::Common, get_script('$')); assert_eq!(Script::Common, get_script('@')); assert_eq!(Script::Common, get_script('-')); assert_eq!(Script::Common, get_script(' ')); assert_eq!(Script::Common, get_script('�')); } }
0
hf_public_repos/tokenizers/tokenizers
hf_public_repos/tokenizers/tokenizers/benches/unigram_benchmark.rs
#[macro_use] extern crate criterion; use criterion::Criterion; use std::collections::HashMap; use std::fs::read_to_string; use std::time::{Duration, Instant}; use tokenizers::models::unigram::Unigram; use tokenizers::models::unigram::UnigramTrainer; pub fn bench_train(c: &mut Criterion) { let trainer = UnigramTrainer::builder() .show_progress(false) .unk_token(Some("<UNK>".into())) .build() .unwrap(); let mut model = Unigram::default(); let content = read_to_string("data/small.txt").unwrap(); let mut word_counts = HashMap::new(); content.split_whitespace().for_each(|word| { // This is important for the test of char vs u8 let word = format!("▁{}", word); *word_counts.entry(word).or_insert(0) += 1; }); let sentences: Vec<_> = word_counts .iter() .map(|(s, i)| (s.to_owned(), *i)) .collect(); c.bench_function("Unigram Train vocabulary (small)", |b| { b.iter_custom(|iters| { let mut duration = Duration::new(0, 0); for _i in 0..iters { let sentences = sentences.clone(); let start = Instant::now(); trainer.do_train(sentences, &mut model).unwrap(); duration = duration.checked_add(start.elapsed()).unwrap(); } duration }) }); let content = read_to_string("data/big.txt").unwrap(); // creating `medium` data, which is the first 25% of `data/big.txt` let content = String::from(&content[..(content.len() as f64 * 0.25) as usize]); let mut word_counts = HashMap::new(); content.split_whitespace().for_each(|word| { // This is important for the test of char vs u8 let word = format!("▁{}", word); *word_counts.entry(word).or_insert(0) += 1; }); let sentences: Vec<_> = word_counts .iter() .map(|(s, i)| (s.to_owned(), *i)) .collect(); c.bench_function("Unigram Train vocabulary (medium)", |b| { b.iter_custom(|iters| { let mut duration = Duration::new(0, 0); for _i in 0..iters { let sentences = sentences.clone(); let start = Instant::now(); trainer.do_train(sentences, &mut model).unwrap(); duration = duration.checked_add(start.elapsed()).unwrap(); } duration }) }); } criterion_group! { name = benches_train; config = Criterion::default().sample_size(10); targets = bench_train } criterion_main!(benches_train);
0
hf_public_repos/tokenizers/tokenizers
hf_public_repos/tokenizers/tokenizers/benches/layout_benchmark.rs
#[macro_use] extern crate criterion; use std::fs::File; use std::io::{BufRead, BufReader}; use std::path::Path; use std::time::{Duration, Instant}; use criterion::black_box; use criterion::Criterion; use tokenizers::processors::template::TemplateProcessing; use tokenizers::{EncodeInput, Encoding, PostProcessor, Tokenizer}; /// Simple TemplateProcessing fn create_processor() -> TemplateProcessing { TemplateProcessing::builder() .try_single("[CLS]:0 $A:0 [SEP]:0") .unwrap() .try_pair("[CLS]:0 $A:0 [SEP]:0 $B:1 [SEP]:1") .unwrap() .special_tokens(vec![("[CLS]", 0), ("[SEP]", 1)]) .build() .unwrap() } pub fn bench_layout(c: &mut Criterion) { let processor = create_processor(); let tokenizer = Tokenizer::from_file("data/albert-base-v1-tokenizer.json").unwrap(); let mut encodeds: Vec<Encoding> = vec![]; for line in BufReader::new(File::open(Path::new("data/big.txt")).unwrap()).lines() { let line: EncodeInput = line.unwrap().into(); let encoded: Encoding = tokenizer.encode(line, false).unwrap(); encodeds.push(encoded); } c.bench_function("TemplateProcessing single encode", |b| { b.iter_custom(|iters| { let mut duration = Duration::new(0, 0); for i in 0..iters as usize { let encoded_index = i % encodeds.len(); let encoded: Encoding = encodeds[encoded_index].clone(); let start = Instant::now(); let _ = black_box(processor.process(encoded, None, false)); duration = duration.checked_add(start.elapsed()).unwrap(); } duration }) }); c.bench_function("TemplateProcessing pair encode", |b| { b.iter_custom(|iters| { let mut duration = Duration::new(0, 0); for i in 0..iters as usize { let encoded_index = i % encodeds.len(); let encoded: Encoding = encodeds[encoded_index].clone(); let encoded_index2 = (i + 1) % encodeds.len(); let pair: Encoding = encodeds[encoded_index2].clone(); let start = Instant::now(); let _ = black_box(processor.process(encoded, Some(pair), false)); duration = duration.checked_add(start.elapsed()).unwrap(); } duration }) }); } criterion_group! { name = layout_benches; config = Criterion::default().sample_size(20); targets = bench_layout } criterion_main!(layout_benches);
0
hf_public_repos/tokenizers/tokenizers
hf_public_repos/tokenizers/tokenizers/benches/bpe_benchmark.rs
#[macro_use] extern crate criterion; mod common; use std::fs::File; use std::io::{BufRead, BufReader}; use std::path::Path; use criterion::Criterion; use tokenizers::models::bpe::{BpeTrainerBuilder, BPE}; use tokenizers::models::TrainerWrapper; use tokenizers::pre_tokenizers::byte_level::ByteLevel; use tokenizers::pre_tokenizers::whitespace::Whitespace; use tokenizers::tokenizer::{AddedToken, EncodeInput}; use tokenizers::Tokenizer; use common::{iter_bench_encode, iter_bench_encode_batch, iter_bench_train}; use std::ops::Deref; static BATCH_SIZE: usize = 1_000; fn create_gpt2_tokenizer(bpe: BPE) -> Tokenizer { let mut tokenizer = Tokenizer::new(bpe); tokenizer.with_pre_tokenizer(ByteLevel::default()); tokenizer.with_decoder(ByteLevel::default()); tokenizer.add_tokens(&[AddedToken::from("ing", false).single_word(false)]); tokenizer.add_special_tokens(&[AddedToken::from("[ENT]", true).single_word(true)]); tokenizer } fn bench_gpt2(c: &mut Criterion) { let bpe = BPE::from_file("data/gpt2-vocab.json", "data/gpt2-merges.txt") .build() .unwrap(); let tokenizer = create_gpt2_tokenizer(bpe); let mut lines: Vec<EncodeInput> = vec![]; let mut batches: Vec<Vec<EncodeInput>> = vec![vec![]]; for line in BufReader::new(File::open(Path::new("data/big.txt")).unwrap()).lines() { let line: EncodeInput = line.unwrap().into(); lines.push(line.clone()); if batches.last().unwrap().len() >= BATCH_SIZE { batches.push(vec![]); } batches.last_mut().unwrap().push(line); } c.bench_function("BPE GPT2 encode", |b| { b.iter_custom(|iters| iter_bench_encode(iters, tokenizer.deref(), &lines)) }); c.bench_function("BPE GPT2 encode batch", |b| { b.iter_custom(|iters| iter_bench_encode_batch(iters, tokenizer.deref(), &batches)) }); let bpe = BPE::from_file("data/gpt2-vocab.json", "data/gpt2-merges.txt") .cache_capacity(0) .build() .unwrap(); let tokenizer = create_gpt2_tokenizer(bpe); c.bench_function("BPE GPT2 encode, no cache", |b| { b.iter_custom(|iters| iter_bench_encode(iters, &tokenizer, &lines)) }); c.bench_function("BPE GPT2 encode batch, no cache", |b| { b.iter_custom(|iters| iter_bench_encode_batch(iters, &tokenizer, &batches)) }); } fn bench_train(c: &mut Criterion) { let mut trainer: TrainerWrapper = BpeTrainerBuilder::default() .show_progress(false) .build() .into(); let mut tokenizer = Tokenizer::new(BPE::default()).into_inner(); tokenizer.with_pre_tokenizer(Whitespace {}); c.bench_function("BPE Train vocabulary (small)", |b| { b.iter_custom(|iters| { iter_bench_train( iters, &mut tokenizer, &mut trainer, vec!["data/small.txt".to_string()], ) }) }); let mut tokenizer = Tokenizer::new(BPE::default()).into_inner(); tokenizer.with_pre_tokenizer(Whitespace {}); c.bench_function("BPE Train vocabulary (big)", |b| { b.iter_custom(|iters| { iter_bench_train( iters, &mut tokenizer, &mut trainer, vec!["data/big.txt".to_string()], ) }) }); } criterion_group! { name = benches; config = Criterion::default().sample_size(20); targets = bench_gpt2 } criterion_group! { name = benches_train; config = Criterion::default().sample_size(10); targets = bench_train } criterion_main!(benches, benches_train);
0
hf_public_repos/tokenizers/tokenizers
hf_public_repos/tokenizers/tokenizers/benches/bert_benchmark.rs
#[macro_use] extern crate criterion; mod common; use std::fs::File; use std::io::{BufRead, BufReader}; use std::path::Path; use criterion::Criterion; use tokenizers::models::wordpiece::{WordPiece, WordPieceTrainerBuilder}; use tokenizers::normalizers::{BertNormalizer, NormalizerWrapper}; use tokenizers::pre_tokenizers::bert::BertPreTokenizer; use tokenizers::processors::bert::BertProcessing; use tokenizers::{decoders, EncodeInput, Model, TokenizerImpl}; use common::{iter_bench_encode, iter_bench_encode_batch, iter_bench_train}; use tokenizers::decoders::DecoderWrapper; use tokenizers::pre_tokenizers::whitespace::Whitespace; use tokenizers::processors::PostProcessorWrapper; static BATCH_SIZE: usize = 1_000; type BertTokenizer = TokenizerImpl< WordPiece, BertNormalizer, BertPreTokenizer, BertProcessing, decoders::wordpiece::WordPiece, >; /// Resembling the BertTokenizer implementation from the Python bindings. fn create_bert_tokenizer(wp: WordPiece) -> BertTokenizer { let sep_id = *wp.get_vocab().get("[SEP]").unwrap(); let cls_id = *wp.get_vocab().get("[CLS]").unwrap(); let mut tokenizer = TokenizerImpl::new(wp); tokenizer.with_pre_tokenizer(BertPreTokenizer); tokenizer.with_normalizer(BertNormalizer::default()); tokenizer.with_decoder(decoders::wordpiece::WordPiece::default()); tokenizer.with_post_processor(BertProcessing::new( ("[SEP]".to_string(), sep_id), ("[CLS]".to_string(), cls_id), )); tokenizer } pub fn bench_bert(c: &mut Criterion) { let wp = WordPiece::from_file("data/bert-base-uncased-vocab.txt") .build() .unwrap(); let tokenizer = create_bert_tokenizer(wp); let mut lines: Vec<EncodeInput> = vec![]; let mut batches: Vec<Vec<EncodeInput>> = vec![vec![]]; for line in BufReader::new(File::open(Path::new("data/big.txt")).unwrap()).lines() { let line: EncodeInput = line.unwrap().into(); lines.push(line.clone()); if batches.last().unwrap().len() >= BATCH_SIZE { batches.push(vec![]); } batches.last_mut().unwrap().push(line); } c.bench_function("WordPiece BERT encode", |b| { b.iter_custom(|iters| iter_bench_encode(iters, &tokenizer, &lines)) }); c.bench_function("WordPiece BERT encode batch", |b| { b.iter_custom(|iters| iter_bench_encode_batch(iters, &tokenizer, &batches)) }); } fn bench_train(c: &mut Criterion) { let mut trainer = WordPieceTrainerBuilder::default() .show_progress(false) .build(); type Tok = TokenizerImpl< WordPiece, NormalizerWrapper, Whitespace, PostProcessorWrapper, DecoderWrapper, >; let mut tokenizer = Tok::new(WordPiece::default()); tokenizer.with_pre_tokenizer(Whitespace {}); c.bench_function("WordPiece Train vocabulary (small)", |b| { b.iter_custom(|iters| { iter_bench_train( iters, &mut tokenizer, &mut trainer, vec!["data/small.txt".to_string()], ) }) }); let mut tokenizer = Tok::new(WordPiece::default()); tokenizer.with_pre_tokenizer(Whitespace {}); c.bench_function("WordPiece Train vocabulary (big)", |b| { b.iter_custom(|iters| { iter_bench_train( iters, &mut tokenizer, &mut trainer, vec!["data/big.txt".to_string()], ) }) }); } criterion_group! { name = bert_benches; config = Criterion::default().sample_size(20); targets = bench_bert } criterion_group! { name = benches_train; config = Criterion::default().sample_size(10); targets = bench_train } criterion_main!(bert_benches, benches_train);
0
hf_public_repos/tokenizers/tokenizers/benches
hf_public_repos/tokenizers/tokenizers/benches/common/mod.rs
use std::time::{Duration, Instant}; use criterion::black_box; use tokenizers::{ Decoder, EncodeInput, Model, Normalizer, PostProcessor, PreTokenizer, TokenizerImpl, Trainer, }; pub fn iter_bench_encode<M, N, PT, PP, D>( iters: u64, tokenizer: &TokenizerImpl<M, N, PT, PP, D>, lines: &[EncodeInput], ) -> Duration where M: Model, N: Normalizer, PT: PreTokenizer, PP: PostProcessor, D: Decoder, { let mut duration = Duration::new(0, 0); let mut line_index: usize = 0; for _i in 0..iters { if line_index >= lines.len() { line_index = 0; } let input = lines[line_index].clone(); let start = Instant::now(); let _ = black_box(tokenizer.encode(input, false)); duration = duration.checked_add(start.elapsed()).unwrap(); } duration } pub fn iter_bench_encode_batch<M, N, PT, PP, D>( iters: u64, tokenizer: &TokenizerImpl<M, N, PT, PP, D>, batches: &[Vec<EncodeInput>], ) -> Duration where M: Model + Send + Sync, N: Normalizer + Send + Sync, PT: PreTokenizer + Send + Sync, PP: PostProcessor + Send + Sync, D: Decoder + Send + Sync, { let mut duration = Duration::new(0, 0); let mut batch_index: usize = 0; for _i in 0..iters { if batch_index >= batches.len() { batch_index = 0; } let batch = batches[batch_index].clone(); let start = Instant::now(); let _ = black_box(tokenizer.encode_batch(batch, false)); duration = duration.checked_add(start.elapsed()).unwrap(); } duration } pub fn iter_bench_train<T, M, N, PT, PP, D>( iters: u64, tokenizer: &mut TokenizerImpl<M, N, PT, PP, D>, trainer: &mut T, files: Vec<String>, ) -> Duration where T: Trainer<Model = M> + Sync, M: Model + Send + Sync, N: Normalizer + Send + Sync, PT: PreTokenizer + Send + Sync, PP: PostProcessor + Send + Sync, D: Decoder + Send + Sync, { let mut duration = Duration::new(0, 0); for _i in 0..iters { let start = Instant::now(); tokenizer.train_from_files(trainer, files.clone()).unwrap(); duration = duration.checked_add(start.elapsed()).unwrap(); } duration }
0
hf_public_repos
hf_public_repos/diffusers/_typos.toml
# Files for typos # Instruction: https://github.com/marketplace/actions/typos-action#getting-started [default.extend-identifiers] [default.extend-words] NIN="NIN" # NIN is used in scripts/convert_ncsnpp_original_checkpoint_to_diffusers.py nd="np" # nd may be np (numpy) parms="parms" # parms is used in scripts/convert_original_stable_diffusion_to_diffusers.py [files] extend-exclude = ["_typos.toml"]
0
hf_public_repos
hf_public_repos/diffusers/README.md
<p align="center"> <br> <img src="https://github.com/huggingface/diffusers/blob/main/docs/source/en/imgs/diffusers_library.jpg" width="400"/> <br> <p> <p align="center"> <a href="https://github.com/huggingface/diffusers/blob/main/LICENSE"> <img alt="GitHub" src="https://img.shields.io/github/license/huggingface/datasets.svg?color=blue"> </a> <a href="https://github.com/huggingface/diffusers/releases"> <img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/diffusers.svg"> </a> <a href="CODE_OF_CONDUCT.md"> <img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-2.0-4baaaa.svg"> </a> </p> 🤗 Diffusers is the go-to library for state-of-the-art pretrained diffusion models for generating images, audio, and even 3D structures of molecules. Whether you're looking for a simple inference solution or training your own diffusion models, 🤗 Diffusers is a modular toolbox that supports both. Our library is designed with a focus on [usability over performance](https://huggingface.co/docs/diffusers/conceptual/philosophy#usability-over-performance), [simple over easy](https://huggingface.co/docs/diffusers/conceptual/philosophy#simple-over-easy), and [customizability over abstractions](https://huggingface.co/docs/diffusers/conceptual/philosophy#tweakable-contributorfriendly-over-abstraction). 🤗 Diffusers offers three core components: - State-of-the-art [diffusion pipelines](https://huggingface.co/docs/diffusers/api/pipelines/overview) that can be run in inference with just a few lines of code. - Interchangeable noise [schedulers](https://huggingface.co/docs/diffusers/api/schedulers/overview) for different diffusion speeds and output quality. - Pretrained [models](https://huggingface.co/docs/diffusers/api/models) that can be used as building blocks, and combined with schedulers, for creating your own end-to-end diffusion systems. ## Installation We recommend installing 🤗 Diffusers in a virtual environment from PyPi or Conda. For more details about installing [PyTorch](https://pytorch.org/get-started/locally/) and [Flax](https://flax.readthedocs.io/en/latest/#installation), please refer to their official documentation. ### PyTorch With `pip` (official package): ```bash pip install --upgrade diffusers[torch] ``` With `conda` (maintained by the community): ```sh conda install -c conda-forge diffusers ``` ### Flax With `pip` (official package): ```bash pip install --upgrade diffusers[flax] ``` ### Apple Silicon (M1/M2) support Please refer to the [How to use Stable Diffusion in Apple Silicon](https://huggingface.co/docs/diffusers/optimization/mps) guide. ## Quickstart Generating outputs is super easy with 🤗 Diffusers. To generate an image from text, use the `from_pretrained` method to load any pretrained diffusion model (browse the [Hub](https://huggingface.co/models?library=diffusers&sort=downloads) for 4000+ checkpoints): ```python from diffusers import DiffusionPipeline import torch pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16) pipeline.to("cuda") pipeline("An image of a squirrel in Picasso style").images[0] ``` You can also dig into the models and schedulers toolbox to build your own diffusion system: ```python from diffusers import DDPMScheduler, UNet2DModel from PIL import Image import torch import numpy as np scheduler = DDPMScheduler.from_pretrained("google/ddpm-cat-256") model = UNet2DModel.from_pretrained("google/ddpm-cat-256").to("cuda") scheduler.set_timesteps(50) sample_size = model.config.sample_size noise = torch.randn((1, 3, sample_size, sample_size)).to("cuda") input = noise for t in scheduler.timesteps: with torch.no_grad(): noisy_residual = model(input, t).sample prev_noisy_sample = scheduler.step(noisy_residual, t, input).prev_sample input = prev_noisy_sample image = (input / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).numpy()[0] image = Image.fromarray((image * 255).round().astype("uint8")) image ``` Check out the [Quickstart](https://huggingface.co/docs/diffusers/quicktour) to launch your diffusion journey today! ## How to navigate the documentation | **Documentation** | **What can I learn?** | |---------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | [Tutorial](https://huggingface.co/docs/diffusers/tutorials/tutorial_overview) | A basic crash course for learning how to use the library's most important features like using models and schedulers to build your own diffusion system, and training your own diffusion model. | | [Loading](https://huggingface.co/docs/diffusers/using-diffusers/loading_overview) | Guides for how to load and configure all the components (pipelines, models, and schedulers) of the library, as well as how to use different schedulers. | | [Pipelines for inference](https://huggingface.co/docs/diffusers/using-diffusers/pipeline_overview) | Guides for how to use pipelines for different inference tasks, batched generation, controlling generated outputs and randomness, and how to contribute a pipeline to the library. | | [Optimization](https://huggingface.co/docs/diffusers/optimization/opt_overview) | Guides for how to optimize your diffusion model to run faster and consume less memory. | | [Training](https://huggingface.co/docs/diffusers/training/overview) | Guides for how to train a diffusion model for different tasks with different training techniques. | ## Contribution We ❤️ contributions from the open-source community! If you want to contribute to this library, please check out our [Contribution guide](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md). You can look out for [issues](https://github.com/huggingface/diffusers/issues) you'd like to tackle to contribute to the library. - See [Good first issues](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) for general opportunities to contribute - See [New model/pipeline](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22New+pipeline%2Fmodel%22) to contribute exciting new diffusion models / diffusion pipelines - See [New scheduler](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22New+scheduler%22) Also, say 👋 in our public Discord channel <a href="https://discord.gg/G7tWnz98XR"><img alt="Join us on Discord" src="https://img.shields.io/discord/823813159592001537?color=5865F2&logo=discord&logoColor=white"></a>. We discuss the hottest trends about diffusion models, help each other with contributions, personal projects or just hang out ☕. ## Popular Tasks & Pipelines <table> <tr> <th>Task</th> <th>Pipeline</th> <th>🤗 Hub</th> </tr> <tr style="border-top: 2px solid black"> <td>Unconditional Image Generation</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/ddpm"> DDPM </a></td> <td><a href="https://huggingface.co/google/ddpm-ema-church-256"> google/ddpm-ema-church-256 </a></td> </tr> <tr style="border-top: 2px solid black"> <td>Text-to-Image</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/text2img">Stable Diffusion Text-to-Image</a></td> <td><a href="https://huggingface.co/runwayml/stable-diffusion-v1-5"> runwayml/stable-diffusion-v1-5 </a></td> </tr> <tr> <td>Text-to-Image</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/unclip">unclip</a></td> <td><a href="https://huggingface.co/kakaobrain/karlo-v1-alpha"> kakaobrain/karlo-v1-alpha </a></td> </tr> <tr> <td>Text-to-Image</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/if">DeepFloyd IF</a></td> <td><a href="https://huggingface.co/DeepFloyd/IF-I-XL-v1.0"> DeepFloyd/IF-I-XL-v1.0 </a></td> </tr> <tr> <td>Text-to-Image</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/kandinsky">Kandinsky</a></td> <td><a href="https://huggingface.co/kandinsky-community/kandinsky-2-2-decoder"> kandinsky-community/kandinsky-2-2-decoder </a></td> </tr> <tr style="border-top: 2px solid black"> <td>Text-guided Image-to-Image</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/controlnet">Controlnet</a></td> <td><a href="https://huggingface.co/lllyasviel/sd-controlnet-canny"> lllyasviel/sd-controlnet-canny </a></td> </tr> <tr> <td>Text-guided Image-to-Image</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/pix2pix">Instruct Pix2Pix</a></td> <td><a href="https://huggingface.co/timbrooks/instruct-pix2pix"> timbrooks/instruct-pix2pix </a></td> </tr> <tr> <td>Text-guided Image-to-Image</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/img2img">Stable Diffusion Image-to-Image</a></td> <td><a href="https://huggingface.co/runwayml/stable-diffusion-v1-5"> runwayml/stable-diffusion-v1-5 </a></td> </tr> <tr style="border-top: 2px solid black"> <td>Text-guided Image Inpainting</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/inpaint">Stable Diffusion Inpaint</a></td> <td><a href="https://huggingface.co/runwayml/stable-diffusion-inpainting"> runwayml/stable-diffusion-inpainting </a></td> </tr> <tr style="border-top: 2px solid black"> <td>Image Variation</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/image_variation">Stable Diffusion Image Variation</a></td> <td><a href="https://huggingface.co/lambdalabs/sd-image-variations-diffusers"> lambdalabs/sd-image-variations-diffusers </a></td> </tr> <tr style="border-top: 2px solid black"> <td>Super Resolution</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/upscale">Stable Diffusion Upscale</a></td> <td><a href="https://huggingface.co/stabilityai/stable-diffusion-x4-upscaler"> stabilityai/stable-diffusion-x4-upscaler </a></td> </tr> <tr> <td>Super Resolution</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/latent_upscale">Stable Diffusion Latent Upscale</a></td> <td><a href="https://huggingface.co/stabilityai/sd-x2-latent-upscaler"> stabilityai/sd-x2-latent-upscaler </a></td> </tr> </table> ## Popular libraries using 🧨 Diffusers - https://github.com/microsoft/TaskMatrix - https://github.com/invoke-ai/InvokeAI - https://github.com/apple/ml-stable-diffusion - https://github.com/Sanster/lama-cleaner - https://github.com/IDEA-Research/Grounded-Segment-Anything - https://github.com/ashawkey/stable-dreamfusion - https://github.com/deep-floyd/IF - https://github.com/bentoml/BentoML - https://github.com/bmaltais/kohya_ss - +3000 other amazing GitHub repositories 💪 Thank you for using us ❤️ ## Credits This library concretizes previous work by many different authors and would not have been possible without their great research and implementations. We'd like to thank, in particular, the following implementations which have helped us in our development and without which the API could not have been as polished today: - @CompVis' latent diffusion models library, available [here](https://github.com/CompVis/latent-diffusion) - @hojonathanho original DDPM implementation, available [here](https://github.com/hojonathanho/diffusion) as well as the extremely useful translation into PyTorch by @pesser, available [here](https://github.com/pesser/pytorch_diffusion) - @ermongroup's DDIM implementation, available [here](https://github.com/ermongroup/ddim) - @yang-song's Score-VE and Score-VP implementations, available [here](https://github.com/yang-song/score_sde_pytorch) We also want to thank @heejkoo for the very helpful overview of papers, code and resources on diffusion models, available [here](https://github.com/heejkoo/Awesome-Diffusion-Models) as well as @crowsonkb and @rromb for useful discussions and insights. ## Citation ```bibtex @misc{von-platen-etal-2022-diffusers, author = {Patrick von Platen and Suraj Patil and Anton Lozhkov and Pedro Cuenca and Nathan Lambert and Kashif Rasul and Mishig Davaadorj and Thomas Wolf}, title = {Diffusers: State-of-the-art diffusion models}, year = {2022}, publisher = {GitHub}, journal = {GitHub repository}, howpublished = {\url{https://github.com/huggingface/diffusers}} } ```
0
hf_public_repos
hf_public_repos/diffusers/PHILOSOPHY.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Philosophy 🧨 Diffusers provides **state-of-the-art** pretrained diffusion models across multiple modalities. Its purpose is to serve as a **modular toolbox** for both inference and training. We aim at building a library that stands the test of time and therefore take API design very seriously. In a nutshell, Diffusers is built to be a natural extension of PyTorch. Therefore, most of our design choices are based on [PyTorch's Design Principles](https://pytorch.org/docs/stable/community/design.html#pytorch-design-philosophy). Let's go over the most important ones: ## Usability over Performance - While Diffusers has many built-in performance-enhancing features (see [Memory and Speed](https://huggingface.co/docs/diffusers/optimization/fp16)), models are always loaded with the highest precision and lowest optimization. Therefore, by default diffusion pipelines are always instantiated on CPU with float32 precision if not otherwise defined by the user. This ensures usability across different platforms and accelerators and means that no complex installations are required to run the library. - Diffusers aim at being a **light-weight** package and therefore has very few required dependencies, but many soft dependencies that can improve performance (such as `accelerate`, `safetensors`, `onnx`, etc...). We strive to keep the library as lightweight as possible so that it can be added without much concern as a dependency on other packages. - Diffusers prefers simple, self-explainable code over condensed, magic code. This means that short-hand code syntaxes such as lambda functions, and advanced PyTorch operators are often not desired. ## Simple over easy As PyTorch states, **explicit is better than implicit** and **simple is better than complex**. This design philosophy is reflected in multiple parts of the library: - We follow PyTorch's API with methods like [`DiffusionPipeline.to`](https://huggingface.co/docs/diffusers/main/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.to) to let the user handle device management. - Raising concise error messages is preferred to silently correct erroneous input. Diffusers aims at teaching the user, rather than making the library as easy to use as possible. - Complex model vs. scheduler logic is exposed instead of magically handled inside. Schedulers/Samplers are separated from diffusion models with minimal dependencies on each other. This forces the user to write the unrolled denoising loop. However, the separation allows for easier debugging and gives the user more control over adapting the denoising process or switching out diffusion models or schedulers. - Separately trained components of the diffusion pipeline, *e.g.* the text encoder, the unet, and the variational autoencoder, each have their own model class. This forces the user to handle the interaction between the different model components, and the serialization format separates the model components into different files. However, this allows for easier debugging and customization. Dreambooth or textual inversion training is very simple thanks to diffusers' ability to separate single components of the diffusion pipeline. ## Tweakable, contributor-friendly over abstraction For large parts of the library, Diffusers adopts an important design principle of the [Transformers library](https://github.com/huggingface/transformers), which is to prefer copy-pasted code over hasty abstractions. This design principle is very opinionated and stands in stark contrast to popular design principles such as [Don't repeat yourself (DRY)](https://en.wikipedia.org/wiki/Don%27t_repeat_yourself). In short, just like Transformers does for modeling files, diffusers prefers to keep an extremely low level of abstraction and very self-contained code for pipelines and schedulers. Functions, long code blocks, and even classes can be copied across multiple files which at first can look like a bad, sloppy design choice that makes the library unmaintainable. **However**, this design has proven to be extremely successful for Transformers and makes a lot of sense for community-driven, open-source machine learning libraries because: - Machine Learning is an extremely fast-moving field in which paradigms, model architectures, and algorithms are changing rapidly, which therefore makes it very difficult to define long-lasting code abstractions. - Machine Learning practitioners like to be able to quickly tweak existing code for ideation and research and therefore prefer self-contained code over one that contains many abstractions. - Open-source libraries rely on community contributions and therefore must build a library that is easy to contribute to. The more abstract the code, the more dependencies, the harder to read, and the harder to contribute to. Contributors simply stop contributing to very abstract libraries out of fear of breaking vital functionality. If contributing to a library cannot break other fundamental code, not only is it more inviting for potential new contributors, but it is also easier to review and contribute to multiple parts in parallel. At Hugging Face, we call this design the **single-file policy** which means that almost all of the code of a certain class should be written in a single, self-contained file. To read more about the philosophy, you can have a look at [this blog post](https://huggingface.co/blog/transformers-design-philosophy). In diffusers, we follow this philosophy for both pipelines and schedulers, but only partly for diffusion models. The reason we don't follow this design fully for diffusion models is because almost all diffusion pipelines, such as [DDPM](https://huggingface.co/docs/diffusers/v0.12.0/en/api/pipelines/ddpm), [Stable Diffusion](https://huggingface.co/docs/diffusers/v0.12.0/en/api/pipelines/stable_diffusion/overview#stable-diffusion-pipelines), [UnCLIP (Dalle-2)](https://huggingface.co/docs/diffusers/v0.12.0/en/api/pipelines/unclip#overview) and [Imagen](https://imagen.research.google/) all rely on the same diffusion model, the [UNet](https://huggingface.co/docs/diffusers/api/models#diffusers.UNet2DConditionModel). Great, now you should have generally understood why 🧨 Diffusers is designed the way it is 🤗. We try to apply these design principles consistently across the library. Nevertheless, there are some minor exceptions to the philosophy or some unlucky design choices. If you have feedback regarding the design, we would ❤️ to hear it [directly on GitHub](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=&template=feedback.md&title=). ## Design Philosophy in Details Now, let's look a bit into the nitty-gritty details of the design philosophy. Diffusers essentially consist of three major classes, [pipelines](https://github.com/huggingface/diffusers/tree/main/src/diffusers/pipelines), [models](https://github.com/huggingface/diffusers/tree/main/src/diffusers/models), and [schedulers](https://github.com/huggingface/diffusers/tree/main/src/diffusers/schedulers). Let's walk through more in-detail design decisions for each class. ### Pipelines Pipelines are designed to be easy to use (therefore do not follow [*Simple over easy*](#simple-over-easy) 100%)), are not feature complete, and should loosely be seen as examples of how to use [models](#models) and [schedulers](#schedulers) for inference. The following design principles are followed: - Pipelines follow the single-file policy. All pipelines can be found in individual directories under src/diffusers/pipelines. One pipeline folder corresponds to one diffusion paper/project/release. Multiple pipeline files can be gathered in one pipeline folder, as it’s done for [`src/diffusers/pipelines/stable-diffusion`](https://github.com/huggingface/diffusers/tree/main/src/diffusers/pipelines/stable_diffusion). If pipelines share similar functionality, one can make use of the [#Copied from mechanism](https://github.com/huggingface/diffusers/blob/125d783076e5bd9785beb05367a2d2566843a271/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py#L251). - Pipelines all inherit from [`DiffusionPipeline`] - Every pipeline consists of different model and scheduler components, that are documented in the [`model_index.json` file](https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/model_index.json), are accessible under the same name as attributes of the pipeline and can be shared between pipelines with [`DiffusionPipeline.components`](https://huggingface.co/docs/diffusers/main/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.components) function. - Every pipeline should be loadable via the [`DiffusionPipeline.from_pretrained`](https://huggingface.co/docs/diffusers/main/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.from_pretrained) function. - Pipelines should be used **only** for inference. - Pipelines should be very readable, self-explanatory, and easy to tweak. - Pipelines should be designed to build on top of each other and be easy to integrate into higher-level APIs. - Pipelines are **not** intended to be feature-complete user interfaces. For future complete user interfaces one should rather have a look at [InvokeAI](https://github.com/invoke-ai/InvokeAI), [Diffuzers](https://github.com/abhishekkrthakur/diffuzers), and [lama-cleaner](https://github.com/Sanster/lama-cleaner) - Every pipeline should have one and only one way to run it via a `__call__` method. The naming of the `__call__` arguments should be shared across all pipelines. - Pipelines should be named after the task they are intended to solve. - In almost all cases, novel diffusion pipelines shall be implemented in a new pipeline folder/file. ### Models Models are designed as configurable toolboxes that are natural extensions of [PyTorch's Module class](https://pytorch.org/docs/stable/generated/torch.nn.Module.html). They only partly follow the **single-file policy**. The following design principles are followed: - Models correspond to **a type of model architecture**. *E.g.* the [`UNet2DConditionModel`] class is used for all UNet variations that expect 2D image inputs and are conditioned on some context. - All models can be found in [`src/diffusers/models`](https://github.com/huggingface/diffusers/tree/main/src/diffusers/models) and every model architecture shall be defined in its file, e.g. [`unet_2d_condition.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/unet_2d_condition.py), [`transformer_2d.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/transformer_2d.py), etc... - Models **do not** follow the single-file policy and should make use of smaller model building blocks, such as [`attention.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention.py), [`resnet.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/resnet.py), [`embeddings.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/embeddings.py), etc... **Note**: This is in stark contrast to Transformers' modeling files and shows that models do not really follow the single-file policy. - Models intend to expose complexity, just like PyTorch's module does, and give clear error messages. - Models all inherit from `ModelMixin` and `ConfigMixin`. - Models can be optimized for performance when it doesn’t demand major code changes, keeps backward compatibility, and gives significant memory or compute gain. - Models should by default have the highest precision and lowest performance setting. - To integrate new model checkpoints whose general architecture can be classified as an architecture that already exists in Diffusers, the existing model architecture shall be adapted to make it work with the new checkpoint. One should only create a new file if the model architecture is fundamentally different. - Models should be designed to be easily extendable to future changes. This can be achieved by limiting public function arguments, configuration arguments, and "foreseeing" future changes, *e.g.* it is usually better to add `string` "...type" arguments that can easily be extended to new future types instead of boolean `is_..._type` arguments. Only the minimum amount of changes shall be made to existing architectures to make a new model checkpoint work. - The model design is a difficult trade-off between keeping code readable and concise and supporting many model checkpoints. For most parts of the modeling code, classes shall be adapted for new model checkpoints, while there are some exceptions where it is preferred to add new classes to make sure the code is kept concise and readable longterm, such as [UNet blocks](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/unet_2d_blocks.py) and [Attention processors](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). ### Schedulers Schedulers are responsible to guide the denoising process for inference as well as to define a noise schedule for training. They are designed as individual classes with loadable configuration files and strongly follow the **single-file policy**. The following design principles are followed: - All schedulers are found in [`src/diffusers/schedulers`](https://github.com/huggingface/diffusers/tree/main/src/diffusers/schedulers). - Schedulers are **not** allowed to import from large utils files and shall be kept very self-contained. - One scheduler python file corresponds to one scheduler algorithm (as might be defined in a paper). - If schedulers share similar functionalities, we can make use of the `#Copied from` mechanism. - Schedulers all inherit from `SchedulerMixin` and `ConfigMixin`. - Schedulers can be easily swapped out with the [`ConfigMixin.from_config`](https://huggingface.co/docs/diffusers/main/en/api/configuration#diffusers.ConfigMixin.from_config) method as explained in detail [here](./using-diffusers/schedulers.md). - Every scheduler has to have a `set_num_inference_steps`, and a `step` function. `set_num_inference_steps(...)` has to be called before every denoising process, *i.e.* before `step(...)` is called. - Every scheduler exposes the timesteps to be "looped over" via a `timesteps` attribute, which is an array of timesteps the model will be called upon - The `step(...)` function takes a predicted model output and the "current" sample (x_t) and returns the "previous", slightly more denoised sample (x_t-1). - Given the complexity of diffusion schedulers, the `step` function does not expose all the complexity and can be a bit of a "black box". - In almost all cases, novel schedulers shall be implemented in a new scheduling file.
0
hf_public_repos
hf_public_repos/diffusers/LICENSE
Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
0
hf_public_repos
hf_public_repos/diffusers/setup.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Simple check list from AllenNLP repo: https://github.com/allenai/allennlp/blob/main/setup.py To create the package for pypi. 1. Run `make pre-release` (or `make pre-patch` for a patch release) then run `make fix-copies` to fix the index of the documentation. If releasing on a special branch, copy the updated README.md on the main branch for your the commit you will make for the post-release and run `make fix-copies` on the main branch as well. 2. Run Tests for Amazon Sagemaker. The documentation is located in `./tests/sagemaker/README.md`, otherwise @philschmid. 3. Unpin specific versions from setup.py that use a git install. 4. Checkout the release branch (v<RELEASE>-release, for example v4.19-release), and commit these changes with the message: "Release: <RELEASE>" and push. 5. Wait for the tests on main to be completed and be green (otherwise revert and fix bugs) 6. Add a tag in git to mark the release: "git tag v<RELEASE> -m 'Adds tag v<RELEASE> for pypi' " Push the tag to git: git push --tags origin v<RELEASE>-release 7. Build both the sources and the wheel. Do not change anything in setup.py between creating the wheel and the source distribution (obviously). For the wheel, run: "python setup.py bdist_wheel" in the top level directory. (this will build a wheel for the python version you use to build it). For the sources, run: "python setup.py sdist" You should now have a /dist directory with both .whl and .tar.gz source versions. 8. Check that everything looks correct by uploading the package to the pypi test server: twine upload dist/* -r pypitest (pypi suggest using twine as other methods upload files via plaintext.) You may have to specify the repository url, use the following command then: twine upload dist/* -r pypitest --repository-url=https://test.pypi.org/legacy/ Check that you can install it in a virtualenv by running: pip install -i https://testpypi.python.org/pypi diffusers Check you can run the following commands: python -c "from diffusers import pipeline; classifier = pipeline('text-classification'); print(classifier('What a nice release'))" python -c "from diffusers import *" 9. Upload the final version to actual pypi: twine upload dist/* -r pypi 10. Copy the release notes from RELEASE.md to the tag in github once everything is looking hunky-dory. 11. Run `make post-release` (or, for a patch release, `make post-patch`). If you were on a branch for the release, you need to go back to main before executing this. """ import os import re from distutils.core import Command from setuptools import find_packages, setup # IMPORTANT: # 1. all dependencies should be listed here with their version requirements if any # 2. once modified, run: `make deps_table_update` to update src/diffusers/dependency_versions_table.py _deps = [ "Pillow", # keep the PIL.Image.Resampling deprecation away "accelerate>=0.11.0", "compel==0.1.8", "black~=23.1", "datasets", "filelock", "flax>=0.4.1", "hf-doc-builder>=0.3.0", "huggingface-hub>=0.13.2", "requests-mock==1.10.0", "importlib_metadata", "invisible-watermark>=0.2.0", "isort>=5.5.4", "jax>=0.2.8,!=0.3.2", "jaxlib>=0.1.65", "Jinja2", "k-diffusion>=0.0.12", "torchsde", "note_seq", "librosa", "numpy", "omegaconf", "parameterized", "protobuf>=3.20.3,<4", "pytest", "pytest-timeout", "pytest-xdist", "ruff>=0.0.241", "safetensors>=0.3.1", "sentencepiece>=0.1.91,!=0.1.92", "scipy", "onnx", "regex!=2019.12.17", "requests", "tensorboard", "torch>=1.4", "torchvision", "transformers>=4.25.1", "urllib3<=2.0.0", ] # this is a lookup table with items like: # # tokenizers: "huggingface-hub==0.8.0" # packaging: "packaging" # # some of the values are versioned whereas others aren't. deps = {b: a for a, b in (re.findall(r"^(([^!=<>~]+)(?:[!=<>~].*)?$)", x)[0] for x in _deps)} # since we save this data in src/diffusers/dependency_versions_table.py it can be easily accessed from # anywhere. If you need to quickly access the data from this table in a shell, you can do so easily with: # # python -c 'import sys; from diffusers.dependency_versions_table import deps; \ # print(" ".join([ deps[x] for x in sys.argv[1:]]))' tokenizers datasets # # Just pass the desired package names to that script as it's shown with 2 packages above. # # If diffusers is not yet installed and the work is done from the cloned repo remember to add `PYTHONPATH=src` to the script above # # You can then feed this for example to `pip`: # # pip install -U $(python -c 'import sys; from diffusers.dependency_versions_table import deps; \ # print(" ".join([ deps[x] for x in sys.argv[1:]]))' tokenizers datasets) # def deps_list(*pkgs): return [deps[pkg] for pkg in pkgs] class DepsTableUpdateCommand(Command): """ A custom distutils command that updates the dependency table. usage: python setup.py deps_table_update """ description = "build runtime dependency table" user_options = [ # format: (long option, short option, description). ("dep-table-update", None, "updates src/diffusers/dependency_versions_table.py"), ] def initialize_options(self): pass def finalize_options(self): pass def run(self): entries = "\n".join([f' "{k}": "{v}",' for k, v in deps.items()]) content = [ "# THIS FILE HAS BEEN AUTOGENERATED. To update:", "# 1. modify the `_deps` dict in setup.py", "# 2. run `make deps_table_update``", "deps = {", entries, "}", "", ] target = "src/diffusers/dependency_versions_table.py" print(f"updating {target}") with open(target, "w", encoding="utf-8", newline="\n") as f: f.write("\n".join(content)) extras = {} extras = {} extras["quality"] = deps_list("urllib3", "black", "isort", "ruff", "hf-doc-builder") extras["docs"] = deps_list("hf-doc-builder") extras["training"] = deps_list("accelerate", "datasets", "protobuf", "tensorboard", "Jinja2") extras["test"] = deps_list( "compel", "datasets", "Jinja2", "invisible-watermark", "k-diffusion", "librosa", "omegaconf", "parameterized", "pytest", "pytest-timeout", "pytest-xdist", "requests-mock", "safetensors", "sentencepiece", "scipy", "torchvision", "transformers", ) extras["torch"] = deps_list("torch", "accelerate") if os.name == "nt": # windows extras["flax"] = [] # jax is not supported on windows else: extras["flax"] = deps_list("jax", "jaxlib", "flax") extras["dev"] = ( extras["quality"] + extras["test"] + extras["training"] + extras["docs"] + extras["torch"] + extras["flax"] ) install_requires = [ deps["importlib_metadata"], deps["filelock"], deps["huggingface-hub"], deps["numpy"], deps["regex"], deps["requests"], deps["safetensors"], deps["Pillow"], ] setup( name="diffusers", version="0.20.0.dev0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots) description="Diffusers", long_description=open("README.md", "r", encoding="utf-8").read(), long_description_content_type="text/markdown", keywords="deep learning", license="Apache", author="The HuggingFace team", author_email="[email protected]", url="https://github.com/huggingface/diffusers", package_dir={"": "src"}, packages=find_packages("src"), include_package_data=True, python_requires=">=3.7.0", install_requires=list(install_requires), extras_require=extras, entry_points={"console_scripts": ["diffusers-cli=diffusers.commands.diffusers_cli:main"]}, classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Intended Audience :: Education", "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Scientific/Engineering :: Artificial Intelligence", ], cmdclass={"deps_table_update": DepsTableUpdateCommand}, ) # Release checklist # 1. Change the version in __init__.py and setup.py. # 2. Commit these changes with the message: "Release: Release" # 3. Add a tag in git to mark the release: "git tag RELEASE -m 'Adds tag RELEASE for pypi' " # Push the tag to git: git push --tags origin main # 4. Run the following commands in the top-level directory: # python setup.py bdist_wheel # python setup.py sdist # 5. Upload the package to the pypi test server first: # twine upload dist/* -r pypitest # twine upload dist/* -r pypitest --repository-url=https://test.pypi.org/legacy/ # 6. Check that you can install it in a virtualenv by running: # pip install -i https://testpypi.python.org/pypi diffusers # diffusers env # diffusers test # 7. Upload the final version to actual pypi: # twine upload dist/* -r pypi # 8. Add release notes to the tag in github once everything is looking hunky-dory. # 9. Update the version in __init__.py, setup.py to the new version "-dev" and push to master
0
hf_public_repos
hf_public_repos/diffusers/pyproject.toml
[tool.black] line-length = 119 target-version = ['py37'] [tool.ruff] # Never enforce `E501` (line length violations). ignore = ["C901", "E501", "E741", "W605"] select = ["C", "E", "F", "I", "W"] line-length = 119 # Ignore import violations in all `__init__.py` files. [tool.ruff.per-file-ignores] "__init__.py" = ["E402", "F401", "F403", "F811"] "src/diffusers/utils/dummy_*.py" = ["F401"] [tool.ruff.isort] lines-after-imports = 2 known-first-party = ["diffusers"]
0
hf_public_repos
hf_public_repos/diffusers/setup.cfg
[isort] default_section = FIRSTPARTY ensure_newline_before_comments = True force_grid_wrap = 0 include_trailing_comma = True known_first_party = accelerate known_third_party = numpy torch torch_xla line_length = 119 lines_after_imports = 2 multi_line_output = 3 use_parentheses = True [flake8] ignore = E203, E722, E501, E741, W503, W605 max-line-length = 119 per-file-ignores = __init__.py:F401
0
hf_public_repos
hf_public_repos/diffusers/CITATION.cff
cff-version: 1.2.0 title: 'Diffusers: State-of-the-art diffusion models' message: >- If you use this software, please cite it using the metadata from this file. type: software authors: - given-names: Patrick family-names: von Platen - given-names: Suraj family-names: Patil - given-names: Anton family-names: Lozhkov - given-names: Pedro family-names: Cuenca - given-names: Nathan family-names: Lambert - given-names: Kashif family-names: Rasul - given-names: Mishig family-names: Davaadorj - given-names: Thomas family-names: Wolf repository-code: 'https://github.com/huggingface/diffusers' abstract: >- Diffusers provides pretrained diffusion models across multiple modalities, such as vision and audio, and serves as a modular toolbox for inference and training of diffusion models. keywords: - deep-learning - pytorch - image-generation - diffusion - text2image - image2image - score-based-generative-modeling - stable-diffusion license: Apache-2.0 version: 0.12.1
0
hf_public_repos
hf_public_repos/diffusers/CODE_OF_CONDUCT.md
# Contributor Covenant Code of Conduct ## Our Pledge We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. ## Our Standards Examples of behavior that contributes to a positive environment for our community include: * Demonstrating empathy and kindness toward other people * Being respectful of differing opinions, viewpoints, and experiences * Giving and gracefully accepting constructive feedback * Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience * Focusing on what is best not just for us as individuals, but for the overall diffusers community Examples of unacceptable behavior include: * The use of sexualized language or imagery, and sexual attention or advances of any kind * Trolling, insulting or derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or email address, without their explicit permission * Spamming issues or PRs with links to projects unrelated to this library * Other conduct which could reasonably be considered inappropriate in a professional setting ## Enforcement Responsibilities Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate. ## Scope This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at [email protected]. All complaints will be reviewed and investigated promptly and fairly. All community leaders are obligated to respect the privacy and security of the reporter of any incident. ## Enforcement Guidelines Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct: ### 1. Correction **Community Impact**: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. **Consequence**: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested. ### 2. Warning **Community Impact**: A violation through a single incident or series of actions. **Consequence**: A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban. ### 3. Temporary Ban **Community Impact**: A serious violation of community standards, including sustained inappropriate behavior. **Consequence**: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban. ### 4. Permanent Ban **Community Impact**: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. **Consequence**: A permanent ban from any sort of public interaction within the community. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 2.0, available at https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder](https://github.com/mozilla/diversity). [homepage]: https://www.contributor-covenant.org For answers to common questions about this code of conduct, see the FAQ at https://www.contributor-covenant.org/faq. Translations are available at https://www.contributor-covenant.org/translations.
0
hf_public_repos
hf_public_repos/diffusers/MANIFEST.in
include LICENSE include src/diffusers/utils/model_card_template.md
0
hf_public_repos
hf_public_repos/diffusers/CONTRIBUTING.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # How to contribute to Diffusers 🧨 We ❤️ contributions from the open-source community! Everyone is welcome, and all types of participation –not just code– are valued and appreciated. Answering questions, helping others, reaching out, and improving the documentation are all immensely valuable to the community, so don't be afraid and get involved if you're up for it! Everyone is encouraged to start by saying 👋 in our public Discord channel. We discuss the latest trends in diffusion models, ask questions, show off personal projects, help each other with contributions, or just hang out ☕. <a href="https://Discord.gg/G7tWnz98XR"><img alt="Join us on Discord" src="https://img.shields.io/Discord/823813159592001537?color=5865F2&logo=Discord&logoColor=white"></a> Whichever way you choose to contribute, we strive to be part of an open, welcoming, and kind community. Please, read our [code of conduct](https://github.com/huggingface/diffusers/blob/main/CODE_OF_CONDUCT.md) and be mindful to respect it during your interactions. We also recommend you become familiar with the [ethical guidelines](https://huggingface.co/docs/diffusers/conceptual/ethical_guidelines) that guide our project and ask you to adhere to the same principles of transparency and responsibility. We enormously value feedback from the community, so please do not be afraid to speak up if you believe you have valuable feedback that can help improve the library - every message, comment, issue, and pull request (PR) is read and considered. ## Overview You can contribute in many ways ranging from answering questions on issues to adding new diffusion models to the core library. In the following, we give an overview of different ways to contribute, ranked by difficulty in ascending order. All of them are valuable to the community. * 1. Asking and answering questions on [the Diffusers discussion forum](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers) or on [Discord](https://discord.gg/G7tWnz98XR). * 2. Opening new issues on [the GitHub Issues tab](https://github.com/huggingface/diffusers/issues/new/choose) * 3. Answering issues on [the GitHub Issues tab](https://github.com/huggingface/diffusers/issues) * 4. Fix a simple issue, marked by the "Good first issue" label, see [here](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22). * 5. Contribute to the [documentation](https://github.com/huggingface/diffusers/tree/main/docs/source). * 6. Contribute a [Community Pipeline](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3Acommunity-examples) * 7. Contribute to the [examples](https://github.com/huggingface/diffusers/tree/main/examples). * 8. Fix a more difficult issue, marked by the "Good second issue" label, see [here](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22Good+second+issue%22). * 9. Add a new pipeline, model, or scheduler, see ["New Pipeline/Model"](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22New+pipeline%2Fmodel%22) and ["New scheduler"](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22New+scheduler%22) issues. For this contribution, please have a look at [Design Philosophy](https://github.com/huggingface/diffusers/blob/main/PHILOSOPHY.md). As said before, **all contributions are valuable to the community**. In the following, we will explain each contribution a bit more in detail. For all contributions 4.-9. you will need to open a PR. It is explained in detail how to do so in [Opening a pull requst](#how-to-open-a-pr) ### 1. Asking and answering questions on the Diffusers discussion forum or on the Diffusers Discord Any question or comment related to the Diffusers library can be asked on the [discussion forum](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers/) or on [Discord](https://discord.gg/G7tWnz98XR). Such questions and comments include (but are not limited to): - Reports of training or inference experiments in an attempt to share knowledge - Presentation of personal projects - Questions to non-official training examples - Project proposals - General feedback - Paper summaries - Asking for help on personal projects that build on top of the Diffusers library - General questions - Ethical questions regarding diffusion models - ... Every question that is asked on the forum or on Discord actively encourages the community to publicly share knowledge and might very well help a beginner in the future that has the same question you're having. Please do pose any questions you might have. In the same spirit, you are of immense help to the community by answering such questions because this way you are publicly documenting knowledge for everybody to learn from. **Please** keep in mind that the more effort you put into asking or answering a question, the higher the quality of the publicly documented knowledge. In the same way, well-posed and well-answered questions create a high-quality knowledge database accessible to everybody, while badly posed questions or answers reduce the overall quality of the public knowledge database. In short, a high quality question or answer is *precise*, *concise*, *relevant*, *easy-to-understand*, *accesible*, and *well-formated/well-posed*. For more information, please have a look through the [How to write a good issue](#how-to-write-a-good-issue) section. **NOTE about channels**: [*The forum*](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers/63) is much better indexed by search engines, such as Google. Posts are ranked by popularity rather than chronologically. Hence, it's easier to look up questions and answers that we posted some time ago. In addition, questions and answers posted in the forum can easily be linked to. In contrast, *Discord* has a chat-like format that invites fast back-and-forth communication. While it will most likely take less time for you to get an answer to your question on Discord, your question won't be visible anymore over time. Also, it's much harder to find information that was posted a while back on Discord. We therefore strongly recommend using the forum for high-quality questions and answers in an attempt to create long-lasting knowledge for the community. If discussions on Discord lead to very interesting answers and conclusions, we recommend posting the results on the forum to make the information more available for future readers. ### 2. Opening new issues on the GitHub issues tab The 🧨 Diffusers library is robust and reliable thanks to the users who notify us of the problems they encounter. So thank you for reporting an issue. Remember, GitHub issues are reserved for technical questions directly related to the Diffusers library, bug reports, feature requests, or feedback on the library design. In a nutshell, this means that everything that is **not** related to the **code of the Diffusers library** (including the documentation) should **not** be asked on GitHub, but rather on either the [forum](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers/63) or [Discord](https://discord.gg/G7tWnz98XR). **Please consider the following guidelines when opening a new issue**: - Make sure you have searched whether your issue has already been asked before (use the search bar on GitHub under Issues). - Please never report a new issue on another (related) issue. If another issue is highly related, please open a new issue nevertheless and link to the related issue. - Make sure your issue is written in English. Please use one of the great, free online translation services, such as [DeepL](https://www.deepl.com/translator) to translate from your native language to English if you are not comfortable in English. - Check whether your issue might be solved by updating to the newest Diffusers version. Before posting your issue, please make sure that `python -c "import diffusers; print(diffusers.__version__)"` is higher or matches the latest Diffusers version. - Remember that the more effort you put into opening a new issue, the higher the quality of your answer will be and the better the overall quality of the Diffusers issues. New issues usually include the following. #### 2.1. Reproducible, minimal bug reports. A bug report should always have a reproducible code snippet and be as minimal and concise as possible. This means in more detail: - Narrow the bug down as much as you can, **do not just dump your whole code file** - Format your code - Do not include any external libraries except for Diffusers depending on them. - **Always** provide all necessary information about your environment; for this, you can run: `diffusers-cli env` in your shell and copy-paste the displayed information to the issue. - Explain the issue. If the reader doesn't know what the issue is and why it is an issue, she cannot solve it. - **Always** make sure the reader can reproduce your issue with as little effort as possible. If your code snippet cannot be run because of missing libraries or undefined variables, the reader cannot help you. Make sure your reproducible code snippet is as minimal as possible and can be copy-pasted into a simple Python shell. - If in order to reproduce your issue a model and/or dataset is required, make sure the reader has access to that model or dataset. You can always upload your model or dataset to the [Hub](https://huggingface.co) to make it easily downloadable. Try to keep your model and dataset as small as possible, to make the reproduction of your issue as effortless as possible. For more information, please have a look through the [How to write a good issue](#how-to-write-a-good-issue) section. You can open a bug report [here](https://github.com/huggingface/diffusers/issues/new/choose). #### 2.2. Feature requests. A world-class feature request addresses the following points: 1. Motivation first: * Is it related to a problem/frustration with the library? If so, please explain why. Providing a code snippet that demonstrates the problem is best. * Is it related to something you would need for a project? We'd love to hear about it! * Is it something you worked on and think could benefit the community? Awesome! Tell us what problem it solved for you. 2. Write a *full paragraph* describing the feature; 3. Provide a **code snippet** that demonstrates its future use; 4. In case this is related to a paper, please attach a link; 5. Attach any additional information (drawings, screenshots, etc.) you think may help. You can open a feature request [here](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=&template=feature_request.md&title=). #### 2.3 Feedback. Feedback about the library design and why it is good or not good helps the core maintainers immensely to build a user-friendly library. To understand the philosophy behind the current design philosophy, please have a look [here](https://huggingface.co/docs/diffusers/conceptual/philosophy). If you feel like a certain design choice does not fit with the current design philosophy, please explain why and how it should be changed. If a certain design choice follows the design philosophy too much, hence restricting use cases, explain why and how it should be changed. If a certain design choice is very useful for you, please also leave a note as this is great feedback for future design decisions. You can open an issue about feedback [here](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=&template=feedback.md&title=). #### 2.4 Technical questions. Technical questions are mainly about why certain code of the library was written in a certain way, or what a certain part of the code does. Please make sure to link to the code in question and please provide detail on why this part of the code is difficult to understand. You can open an issue about a technical question [here](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=bug&template=bug-report.yml). #### 2.5 Proposal to add a new model, scheduler, or pipeline. If the diffusion model community released a new model, pipeline, or scheduler that you would like to see in the Diffusers library, please provide the following information: * Short description of the diffusion pipeline, model, or scheduler and link to the paper or public release. * Link to any of its open-source implementation. * Link to the model weights if they are available. If you are willing to contribute to the model yourself, let us know so we can best guide you. Also, don't forget to tag the original author of the component (model, scheduler, pipeline, etc.) by GitHub handle if you can find it. You can open a request for a model/pipeline/scheduler [here](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=New+model%2Fpipeline%2Fscheduler&template=new-model-addition.yml). ### 3. Answering issues on the GitHub issues tab Answering issues on GitHub might require some technical knowledge of Diffusers, but we encourage everybody to give it a try even if you are not 100% certain that your answer is correct. Some tips to give a high-quality answer to an issue: - Be as concise and minimal as possible - Stay on topic. An answer to the issue should concern the issue and only the issue. - Provide links to code, papers, or other sources that prove or encourage your point. - Answer in code. If a simple code snippet is the answer to the issue or shows how the issue can be solved, please provide a fully reproducible code snippet. Also, many issues tend to be simply off-topic, duplicates of other issues, or irrelevant. It is of great help to the maintainers if you can answer such issues, encouraging the author of the issue to be more precise, provide the link to a duplicated issue or redirect them to [the forum](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers/63) or [Discord](https://discord.gg/G7tWnz98XR) If you have verified that the issued bug report is correct and requires a correction in the source code, please have a look at the next sections. For all of the following contributions, you will need to open a PR. It is explained in detail how to do so in the [Opening a pull requst](#how-to-open-a-pr) section. ### 4. Fixing a "Good first issue" *Good first issues* are marked by the [Good first issue](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) label. Usually, the issue already explains how a potential solution should look so that it is easier to fix. If the issue hasn't been closed and you would like to try to fix this issue, you can just leave a message "I would like to try this issue.". There are usually three scenarios: - a.) The issue description already proposes a fix. In this case and if the solution makes sense to you, you can open a PR or draft PR to fix it. - b.) The issue description does not propose a fix. In this case, you can ask what a proposed fix could look like and someone from the Diffusers team should answer shortly. If you have a good idea of how to fix it, feel free to directly open a PR. - c.) There is already an open PR to fix the issue, but the issue hasn't been closed yet. If the PR has gone stale, you can simply open a new PR and link to the stale PR. PRs often go stale if the original contributor who wanted to fix the issue suddenly cannot find the time anymore to proceed. This often happens in open-source and is very normal. In this case, the community will be very happy if you give it a new try and leverage the knowledge of the existing PR. If there is already a PR and it is active, you can help the author by giving suggestions, reviewing the PR or even asking whether you can contribute to the PR. ### 5. Contribute to the documentation A good library **always** has good documentation! The official documentation is often one of the first points of contact for new users of the library, and therefore contributing to the documentation is a **highly valuable contribution**. Contributing to the library can have many forms: - Correcting spelling or grammatical errors. - Correct incorrect formatting of the docstring. If you see that the official documentation is weirdly displayed or a link is broken, we are very happy if you take some time to correct it. - Correct the shape or dimensions of a docstring input or output tensor. - Clarify documentation that is hard to understand or incorrect. - Update outdated code examples. - Translating the documentation to another language. Anything displayed on [the official Diffusers doc page](https://huggingface.co/docs/diffusers/index) is part of the official documentation and can be corrected, adjusted in the respective [documentation source](https://github.com/huggingface/diffusers/tree/main/docs/source). Please have a look at [this page](https://github.com/huggingface/diffusers/tree/main/docs) on how to verify changes made to the documentation locally. ### 6. Contribute a community pipeline [Pipelines](https://huggingface.co/docs/diffusers/api/pipelines/overview) are usually the first point of contact between the Diffusers library and the user. Pipelines are examples of how to use Diffusers [models](https://huggingface.co/docs/diffusers/api/models) and [schedulers](https://huggingface.co/docs/diffusers/api/schedulers/overview). We support two types of pipelines: - Official Pipelines - Community Pipelines Both official and community pipelines follow the same design and consist of the same type of components. Official pipelines are tested and maintained by the core maintainers of Diffusers. Their code resides in [src/diffusers/pipelines](https://github.com/huggingface/diffusers/tree/main/src/diffusers/pipelines). In contrast, community pipelines are contributed and maintained purely by the **community** and are **not** tested. They reside in [examples/community](https://github.com/huggingface/diffusers/tree/main/examples/community) and while they can be accessed via the [PyPI diffusers package](https://pypi.org/project/diffusers/), their code is not part of the PyPI distribution. The reason for the distinction is that the core maintainers of the Diffusers library cannot maintain and test all possible ways diffusion models can be used for inference, but some of them may be of interest to the community. Officially released diffusion pipelines, such as Stable Diffusion are added to the core src/diffusers/pipelines package which ensures high quality of maintenance, no backward-breaking code changes, and testing. More bleeding edge pipelines should be added as community pipelines. If usage for a community pipeline is high, the pipeline can be moved to the official pipelines upon request from the community. This is one of the ways we strive to be a community-driven library. To add a community pipeline, one should add a <name-of-the-community>.py file to [examples/community](https://github.com/huggingface/diffusers/tree/main/examples/community) and adapt the [examples/community/README.md](https://github.com/huggingface/diffusers/tree/main/examples/community/README.md) to include an example of the new pipeline. An example can be seen [here](https://github.com/huggingface/diffusers/pull/2400). Community pipeline PRs are only checked at a superficial level and ideally they should be maintained by their original authors. Contributing a community pipeline is a great way to understand how Diffusers models and schedulers work. Having contributed a community pipeline is usually the first stepping stone to contributing an official pipeline to the core package. ### 7. Contribute to training examples Diffusers examples are a collection of training scripts that reside in [examples](https://github.com/huggingface/diffusers/tree/main/examples). We support two types of training examples: - Official training examples - Research training examples Research training examples are located in [examples/research_projects](https://github.com/huggingface/diffusers/tree/main/examples/research_projects) whereas official training examples include all folders under [examples](https://github.com/huggingface/diffusers/tree/main/examples) except the `research_projects` and `community` folders. The official training examples are maintained by the Diffusers' core maintainers whereas the research training examples are maintained by the community. This is because of the same reasons put forward in [6. Contribute a community pipeline](#contribute-a-community-pipeline) for official pipelines vs. community pipelines: It is not feasible for the core maintainers to maintain all possible training methods for diffusion models. If the Diffusers core maintainers and the community consider a certain training paradigm to be too experimental or not popular enough, the corresponding training code should be put in the `research_projects` folder and maintained by the author. Both official training and research examples consist of a directory that contains one or more training scripts, a requirements.txt file, and a README.md file. In order for the user to make use of the training examples, it is required to clone the repository: ``` git clone https://github.com/huggingface/diffusers ``` as well as to install all additional dependencies required for training: ``` pip install -r /examples/<your-example-folder>/requirements.txt ``` Therefore when adding an example, the `requirements.txt` file shall define all pip dependencies required for your training example so that once all those are installed, the user can run the example's training script. See, for example, the [DreamBooth `requirements.txt` file](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/requirements.txt). Training examples of the Diffusers library should adhere to the following philosophy: - All the code necessary to run the examples should be found in a single Python file - One should be able to run the example from the command line with `python <your-example>.py --args` - Examples should be kept simple and serve as **an example** on how to use Diffusers for training. The purpose of example scripts is **not** to create state-of-the-art diffusion models, but rather to reproduce known training schemes without adding too much custom logic. As a byproduct of this point, our examples also strive to serve as good educational materials. To contribute an example, it is highly recommended to look at already existing examples such as [dreambooth](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth.py) to get an idea of how they should look like. We strongly advise contributors to make use of the [Accelerate library](https://github.com/huggingface/accelerate) as it's tightly integrated with Diffusers. Once an example script works, please make sure to add a comprehensive `README.md` that states how to use the example exactly. This README should include: - An example command on how to run the example script as shown [here e.g.](https://github.com/huggingface/diffusers/tree/main/examples/dreambooth#running-locally-with-pytorch). - A link to some training results (logs, models, ...) that show what the user can expect as shown [here e.g.](https://api.wandb.ai/report/patrickvonplaten/xm6cd5q5). - If you are adding a non-official/research training example, **please don't forget** to add a sentence that you are maintaining this training example which includes your git handle as shown [here](https://github.com/huggingface/diffusers/tree/main/examples/research_projects/intel_opts#diffusers-examples-with-intel-optimizations). If you are contributing to the official training examples, please also make sure to add a test to [examples/test_examples.py](https://github.com/huggingface/diffusers/blob/main/examples/test_examples.py). This is not necessary for non-official training examples. ### 8. Fixing a "Good second issue" *Good second issues* are marked by the [Good second issue](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22Good+second+issue%22) label. Good second issues are usually more complicated to solve than [Good first issues](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22). The issue description usually gives less guidance on how to fix the issue and requires a decent understanding of the library by the interested contributor. If you are interested in tackling a second good issue, feel free to open a PR to fix it and link the PR to the issue. If you see that a PR has already been opened for this issue but did not get merged, have a look to understand why it wasn't merged and try to open an improved PR. Good second issues are usually more difficult to get merged compared to good first issues, so don't hesitate to ask for help from the core maintainers. If your PR is almost finished the core maintainers can also jump into your PR and commit to it in order to get it merged. ### 9. Adding pipelines, models, schedulers Pipelines, models, and schedulers are the most important pieces of the Diffusers library. They provide easy access to state-of-the-art diffusion technologies and thus allow the community to build powerful generative AI applications. By adding a new model, pipeline, or scheduler you might enable a new powerful use case for any of the user interfaces relying on Diffusers which can be of immense value for the whole generative AI ecosystem. Diffusers has a couple of open feature requests for all three components - feel free to gloss over them if you don't know yet what specific component you would like to add: - [Model or pipeline](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22New+pipeline%2Fmodel%22) - [Scheduler](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22New+scheduler%22) Before adding any of the three components, it is strongly recommended that you give the [Philosophy guide](https://github.com/huggingface/diffusers/blob/main/PHILOSOPHY.md) a read to better understand the design of any of the three components. Please be aware that we cannot merge model, scheduler, or pipeline additions that strongly diverge from our design philosophy as it will lead to API inconsistencies. If you fundamentally disagree with a design choice, please open a [Feedback issue](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=&template=feedback.md&title=) instead so that it can be discussed whether a certain design pattern/design choice shall be changed everywhere in the library and whether we shall update our design philosophy. Consistency across the library is very important for us. Please make sure to add links to the original codebase/paper to the PR and ideally also ping the original author directly on the PR so that they can follow the progress and potentially help with questions. If you are unsure or stuck in the PR, don't hesitate to leave a message to ask for a first review or help. ## How to write a good issue **The better your issue is written, the higher the chances that it will be quickly resolved.** 1. Make sure that you've used the correct template for your issue. You can pick between *Bug Report*, *Feature Request*, *Feedback about API Design*, *New model/pipeline/scheduler addition*, *Forum*, or a blank issue. Make sure to pick the correct one when opening [a new issue](https://github.com/huggingface/diffusers/issues/new/choose). 2. **Be precise**: Give your issue a fitting title. Try to formulate your issue description as simple as possible. The more precise you are when submitting an issue, the less time it takes to understand the issue and potentially solve it. Make sure to open an issue for one issue only and not for multiple issues. If you found multiple issues, simply open multiple issues. If your issue is a bug, try to be as precise as possible about what bug it is - you should not just write "Error in diffusers". 3. **Reproducibility**: No reproducible code snippet == no solution. If you encounter a bug, maintainers **have to be able to reproduce** it. Make sure that you include a code snippet that can be copy-pasted into a Python interpreter to reproduce the issue. Make sure that your code snippet works, *i.e.* that there are no missing imports or missing links to images, ... Your issue should contain an error message **and** a code snippet that can be copy-pasted without any changes to reproduce the exact same error message. If your issue is using local model weights or local data that cannot be accessed by the reader, the issue cannot be solved. If you cannot share your data or model, try to make a dummy model or dummy data. 4. **Minimalistic**: Try to help the reader as much as you can to understand the issue as quickly as possible by staying as concise as possible. Remove all code / all information that is irrelevant to the issue. If you have found a bug, try to create the easiest code example you can to demonstrate your issue, do not just dump your whole workflow into the issue as soon as you have found a bug. E.g., if you train a model and get an error at some point during the training, you should first try to understand what part of the training code is responsible for the error and try to reproduce it with a couple of lines. Try to use dummy data instead of full datasets. 5. Add links. If you are referring to a certain naming, method, or model make sure to provide a link so that the reader can better understand what you mean. If you are referring to a specific PR or issue, make sure to link it to your issue. Do not assume that the reader knows what you are talking about. The more links you add to your issue the better. 6. Formatting. Make sure to nicely format your issue by formatting code into Python code syntax, and error messages into normal code syntax. See the [official GitHub formatting docs](https://docs.github.com/en/get-started/writing-on-github/getting-started-with-writing-and-formatting-on-github/basic-writing-and-formatting-syntax) for more information. 7. Think of your issue not as a ticket to be solved, but rather as a beautiful entry to a well-written encyclopedia. Every added issue is a contribution to publicly available knowledge. By adding a nicely written issue you not only make it easier for maintainers to solve your issue, but you are helping the whole community to better understand a certain aspect of the library. ## How to write a good PR 1. Be a chameleon. Understand existing design patterns and syntax and make sure your code additions flow seamlessly into the existing code base. Pull requests that significantly diverge from existing design patterns or user interfaces will not be merged. 2. Be laser focused. A pull request should solve one problem and one problem only. Make sure to not fall into the trap of "also fixing another problem while we're adding it". It is much more difficult to review pull requests that solve multiple, unrelated problems at once. 3. If helpful, try to add a code snippet that displays an example of how your addition can be used. 4. The title of your pull request should be a summary of its contribution. 5. If your pull request addresses an issue, please mention the issue number in the pull request description to make sure they are linked (and people consulting the issue know you are working on it); 6. To indicate a work in progress please prefix the title with `[WIP]`. These are useful to avoid duplicated work, and to differentiate it from PRs ready to be merged; 7. Try to formulate and format your text as explained in [How to write a good issue](#how-to-write-a-good-issue). 8. Make sure existing tests pass; 9. Add high-coverage tests. No quality testing = no merge. - If you are adding new `@slow` tests, make sure they pass using `RUN_SLOW=1 python -m pytest tests/test_my_new_model.py`. CircleCI does not run the slow tests, but GitHub actions does every night! 10. All public methods must have informative docstrings that work nicely with markdown. See `[pipeline_latent_diffusion.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py)` for an example. 11. Due to the rapidly growing repository, it is important to make sure that no files that would significantly weigh down the repository are added. This includes images, videos, and other non-text files. We prefer to leverage a hf.co hosted `dataset` like [`hf-internal-testing`](https://huggingface.co/hf-internal-testing) or [huggingface/documentation-images](https://huggingface.co/datasets/huggingface/documentation-images) to place these files. If an external contribution, feel free to add the images to your PR and ask a Hugging Face member to migrate your images to this dataset. ## How to open a PR Before writing code, we strongly advise you to search through the existing PRs or issues to make sure that nobody is already working on the same thing. If you are unsure, it is always a good idea to open an issue to get some feedback. You will need basic `git` proficiency to be able to contribute to 🧨 Diffusers. `git` is not the easiest tool to use but it has the greatest manual. Type `git --help` in a shell and enjoy. If you prefer books, [Pro Git](https://git-scm.com/book/en/v2) is a very good reference. Follow these steps to start contributing ([supported Python versions](https://github.com/huggingface/diffusers/blob/main/setup.py#L244)): 1. Fork the [repository](https://github.com/huggingface/diffusers) by clicking on the 'Fork' button on the repository's page. This creates a copy of the code under your GitHub user account. 2. Clone your fork to your local disk, and add the base repository as a remote: ```bash $ git clone [email protected]:<your Github handle>/diffusers.git $ cd diffusers $ git remote add upstream https://github.com/huggingface/diffusers.git ``` 3. Create a new branch to hold your development changes: ```bash $ git checkout -b a-descriptive-name-for-my-changes ``` **Do not** work on the `main` branch. 4. Set up a development environment by running the following command in a virtual environment: ```bash $ pip install -e ".[dev]" ``` If you have already cloned the repo, you might need to `git pull` to get the most recent changes in the library. 5. Develop the features on your branch. As you work on the features, you should make sure that the test suite passes. You should run the tests impacted by your changes like this: ```bash $ pytest tests/<TEST_TO_RUN>.py ``` Before you run the tests, please make sure you install the dependencies required for testing. You can do so with this command: ```bash $ pip install -e ".[test]" ``` You can run the full test suite with the following command, but it takes a beefy machine to produce a result in a decent amount of time now that Diffusers has grown a lot. Here is the command for it: ```bash $ make test ``` 🧨 Diffusers relies on `black` and `isort` to format its source code consistently. After you make changes, apply automatic style corrections and code verifications that can't be automated in one go with: ```bash $ make style ``` 🧨 Diffusers also uses `ruff` and a few custom scripts to check for coding mistakes. Quality control runs in CI, however, you can also run the same checks with: ```bash $ make quality ``` Once you're happy with your changes, add changed files using `git add` and make a commit with `git commit` to record your changes locally: ```bash $ git add modified_file.py $ git commit ``` It is a good idea to sync your copy of the code with the original repository regularly. This way you can quickly account for changes: ```bash $ git pull upstream main ``` Push the changes to your account using: ```bash $ git push -u origin a-descriptive-name-for-my-changes ``` 6. Once you are satisfied, go to the webpage of your fork on GitHub. Click on 'Pull request' to send your changes to the project maintainers for review. 7. It's ok if maintainers ask you for changes. It happens to core contributors too! So everyone can see the changes in the Pull request, work in your local branch and push the changes to your fork. They will automatically appear in the pull request. ### Tests An extensive test suite is included to test the library behavior and several examples. Library tests can be found in the [tests folder](https://github.com/huggingface/diffusers/tree/main/tests). We like `pytest` and `pytest-xdist` because it's faster. From the root of the repository, here's how to run tests with `pytest` for the library: ```bash $ python -m pytest -n auto --dist=loadfile -s -v ./tests/ ``` In fact, that's how `make test` is implemented! You can specify a smaller set of tests in order to test only the feature you're working on. By default, slow tests are skipped. Set the `RUN_SLOW` environment variable to `yes` to run them. This will download many gigabytes of models — make sure you have enough disk space and a good Internet connection, or a lot of patience! ```bash $ RUN_SLOW=yes python -m pytest -n auto --dist=loadfile -s -v ./tests/ ``` `unittest` is fully supported, here's how to run tests with it: ```bash $ python -m unittest discover -s tests -t . -v $ python -m unittest discover -s examples -t examples -v ``` ### Syncing forked main with upstream (HuggingFace) main To avoid pinging the upstream repository which adds reference notes to each upstream PR and sends unnecessary notifications to the developers involved in these PRs, when syncing the main branch of a forked repository, please, follow these steps: 1. When possible, avoid syncing with the upstream using a branch and PR on the forked repository. Instead, merge directly into the forked main. 2. If a PR is absolutely necessary, use the following steps after checking out your branch: ``` $ git checkout -b your-branch-for-syncing $ git pull --squash --no-commit upstream main $ git commit -m '<your message without GitHub references>' $ git push --set-upstream origin your-branch-for-syncing ``` ### Style guide For documentation strings, 🧨 Diffusers follows the [google style](https://google.github.io/styleguide/pyguide.html).
0
hf_public_repos
hf_public_repos/diffusers/Makefile
.PHONY: deps_table_update modified_only_fixup extra_style_checks quality style fixup fix-copies test test-examples # make sure to test the local checkout in scripts and not the pre-installed one (don't use quotes!) export PYTHONPATH = src check_dirs := examples scripts src tests utils modified_only_fixup: $(eval modified_py_files := $(shell python utils/get_modified_files.py $(check_dirs))) @if test -n "$(modified_py_files)"; then \ echo "Checking/fixing $(modified_py_files)"; \ black $(modified_py_files); \ ruff $(modified_py_files); \ else \ echo "No library .py files were modified"; \ fi # Update src/diffusers/dependency_versions_table.py deps_table_update: @python setup.py deps_table_update deps_table_check_updated: @md5sum src/diffusers/dependency_versions_table.py > md5sum.saved @python setup.py deps_table_update @md5sum -c --quiet md5sum.saved || (printf "\nError: the version dependency table is outdated.\nPlease run 'make fixup' or 'make style' and commit the changes.\n\n" && exit 1) @rm md5sum.saved # autogenerating code autogenerate_code: deps_table_update # Check that the repo is in a good state repo-consistency: python utils/check_dummies.py python utils/check_repo.py python utils/check_inits.py # this target runs checks on all files quality: black --check $(check_dirs) ruff $(check_dirs) doc-builder style src/diffusers docs/source --max_len 119 --check_only --path_to_docs docs/source python utils/check_doc_toc.py # Format source code automatically and check is there are any problems left that need manual fixing extra_style_checks: python utils/custom_init_isort.py doc-builder style src/diffusers docs/source --max_len 119 --path_to_docs docs/source python utils/check_doc_toc.py --fix_and_overwrite # this target runs checks on all files and potentially modifies some of them style: black $(check_dirs) ruff $(check_dirs) --fix ${MAKE} autogenerate_code ${MAKE} extra_style_checks # Super fast fix and check target that only works on relevant modified files since the branch was made fixup: modified_only_fixup extra_style_checks autogenerate_code repo-consistency # Make marked copies of snippets of codes conform to the original fix-copies: python utils/check_copies.py --fix_and_overwrite python utils/check_dummies.py --fix_and_overwrite # Run tests for the library test: python -m pytest -n auto --dist=loadfile -s -v ./tests/ # Run tests for examples test-examples: python -m pytest -n auto --dist=loadfile -s -v ./examples/pytorch/ # Release stuff pre-release: python utils/release.py pre-patch: python utils/release.py --patch post-release: python utils/release.py --post_release post-patch: python utils/release.py --post_release --patch
0
hf_public_repos/diffusers
hf_public_repos/diffusers/examples/test_examples.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc.. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import shutil import subprocess import sys import tempfile import unittest from typing import List import torch from accelerate.utils import write_basic_config from diffusers import DiffusionPipeline, UNet2DConditionModel logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger() # These utils relate to ensuring the right error message is received when running scripts class SubprocessCallException(Exception): pass def run_command(command: List[str], return_stdout=False): """ Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture if an error occurred while running `command` """ try: output = subprocess.check_output(command, stderr=subprocess.STDOUT) if return_stdout: if hasattr(output, "decode"): output = output.decode("utf-8") return output except subprocess.CalledProcessError as e: raise SubprocessCallException( f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}" ) from e stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class ExamplesTestsAccelerate(unittest.TestCase): @classmethod def setUpClass(cls): super().setUpClass() cls._tmpdir = tempfile.mkdtemp() cls.configPath = os.path.join(cls._tmpdir, "default_config.yml") write_basic_config(save_location=cls.configPath) cls._launch_args = ["accelerate", "launch", "--config_file", cls.configPath] @classmethod def tearDownClass(cls): super().tearDownClass() shutil.rmtree(cls._tmpdir) def test_train_unconditional(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/unconditional_image_generation/train_unconditional.py --dataset_name hf-internal-testing/dummy_image_class_data --model_config_name_or_path diffusers/ddpm_dummy --resolution 64 --output_dir {tmpdir} --train_batch_size 2 --num_epochs 1 --gradient_accumulation_steps 1 --ddpm_num_inference_steps 2 --learning_rate 1e-3 --lr_warmup_steps 5 """.split() run_command(self._launch_args + test_args, return_stdout=True) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "unet", "diffusion_pytorch_model.bin"))) self.assertTrue(os.path.isfile(os.path.join(tmpdir, "scheduler", "scheduler_config.json"))) def test_textual_inversion(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/textual_inversion/textual_inversion.py --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-pipe --train_data_dir docs/source/en/imgs --learnable_property object --placeholder_token <cat-toy> --initializer_token a --validation_prompt <cat-toy> --validation_steps 1 --save_steps 1 --num_vectors 2 --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "learned_embeds.bin"))) def test_dreambooth(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/dreambooth/train_dreambooth.py --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-pipe --instance_data_dir docs/source/en/imgs --instance_prompt photo --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "unet", "diffusion_pytorch_model.bin"))) self.assertTrue(os.path.isfile(os.path.join(tmpdir, "scheduler", "scheduler_config.json"))) def test_dreambooth_if(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/dreambooth/train_dreambooth.py --pretrained_model_name_or_path hf-internal-testing/tiny-if-pipe --instance_data_dir docs/source/en/imgs --instance_prompt photo --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --pre_compute_text_embeddings --tokenizer_max_length=77 --text_encoder_use_attention_mask """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "unet", "diffusion_pytorch_model.bin"))) self.assertTrue(os.path.isfile(os.path.join(tmpdir, "scheduler", "scheduler_config.json"))) def test_dreambooth_checkpointing(self): instance_prompt = "photo" pretrained_model_name_or_path = "hf-internal-testing/tiny-stable-diffusion-pipe" with tempfile.TemporaryDirectory() as tmpdir: # Run training script with checkpointing # max_train_steps == 5, checkpointing_steps == 2 # Should create checkpoints at steps 2, 4 initial_run_args = f""" examples/dreambooth/train_dreambooth.py --pretrained_model_name_or_path {pretrained_model_name_or_path} --instance_data_dir docs/source/en/imgs --instance_prompt {instance_prompt} --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 5 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=2 --seed=0 """.split() run_command(self._launch_args + initial_run_args) # check can run the original fully trained output pipeline pipe = DiffusionPipeline.from_pretrained(tmpdir, safety_checker=None) pipe(instance_prompt, num_inference_steps=2) # check checkpoint directories exist self.assertTrue(os.path.isdir(os.path.join(tmpdir, "checkpoint-2"))) self.assertTrue(os.path.isdir(os.path.join(tmpdir, "checkpoint-4"))) # check can run an intermediate checkpoint unet = UNet2DConditionModel.from_pretrained(tmpdir, subfolder="checkpoint-2/unet") pipe = DiffusionPipeline.from_pretrained(pretrained_model_name_or_path, unet=unet, safety_checker=None) pipe(instance_prompt, num_inference_steps=2) # Remove checkpoint 2 so that we can check only later checkpoints exist after resuming shutil.rmtree(os.path.join(tmpdir, "checkpoint-2")) # Run training script for 7 total steps resuming from checkpoint 4 resume_run_args = f""" examples/dreambooth/train_dreambooth.py --pretrained_model_name_or_path {pretrained_model_name_or_path} --instance_data_dir docs/source/en/imgs --instance_prompt {instance_prompt} --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 7 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=2 --resume_from_checkpoint=checkpoint-4 --seed=0 """.split() run_command(self._launch_args + resume_run_args) # check can run new fully trained pipeline pipe = DiffusionPipeline.from_pretrained(tmpdir, safety_checker=None) pipe(instance_prompt, num_inference_steps=2) # check old checkpoints do not exist self.assertFalse(os.path.isdir(os.path.join(tmpdir, "checkpoint-2"))) # check new checkpoints exist self.assertTrue(os.path.isdir(os.path.join(tmpdir, "checkpoint-4"))) self.assertTrue(os.path.isdir(os.path.join(tmpdir, "checkpoint-6"))) def test_dreambooth_lora(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/dreambooth/train_dreambooth_lora.py --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-pipe --instance_data_dir docs/source/en/imgs --instance_prompt photo --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.bin"))) # make sure the state_dict has the correct naming in the parameters. lora_state_dict = torch.load(os.path.join(tmpdir, "pytorch_lora_weights.bin")) is_lora = all("lora" in k for k in lora_state_dict.keys()) self.assertTrue(is_lora) # when not training the text encoder, all the parameters in the state dict should start # with `"unet"` in their names. starts_with_unet = all(key.startswith("unet") for key in lora_state_dict.keys()) self.assertTrue(starts_with_unet) def test_dreambooth_lora_with_text_encoder(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/dreambooth/train_dreambooth_lora.py --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-pipe --instance_data_dir docs/source/en/imgs --instance_prompt photo --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --train_text_encoder --output_dir {tmpdir} """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.bin"))) # check `text_encoder` is present at all. lora_state_dict = torch.load(os.path.join(tmpdir, "pytorch_lora_weights.bin")) keys = lora_state_dict.keys() is_text_encoder_present = any(k.startswith("text_encoder") for k in keys) self.assertTrue(is_text_encoder_present) # the names of the keys of the state dict should either start with `unet` # or `text_encoder`. is_correct_naming = all(k.startswith("unet") or k.startswith("text_encoder") for k in keys) self.assertTrue(is_correct_naming) def test_dreambooth_lora_if_model(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/dreambooth/train_dreambooth_lora.py --pretrained_model_name_or_path hf-internal-testing/tiny-if-pipe --instance_data_dir docs/source/en/imgs --instance_prompt photo --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --pre_compute_text_embeddings --tokenizer_max_length=77 --text_encoder_use_attention_mask """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.bin"))) # make sure the state_dict has the correct naming in the parameters. lora_state_dict = torch.load(os.path.join(tmpdir, "pytorch_lora_weights.bin")) is_lora = all("lora" in k for k in lora_state_dict.keys()) self.assertTrue(is_lora) # when not training the text encoder, all the parameters in the state dict should start # with `"unet"` in their names. starts_with_unet = all(key.startswith("unet") for key in lora_state_dict.keys()) self.assertTrue(starts_with_unet) def test_dreambooth_lora_sdxl(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/dreambooth/train_dreambooth_lora_sdxl.py --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-xl-pipe --instance_data_dir docs/source/en/imgs --instance_prompt photo --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.bin"))) # make sure the state_dict has the correct naming in the parameters. lora_state_dict = torch.load(os.path.join(tmpdir, "pytorch_lora_weights.bin")) is_lora = all("lora" in k for k in lora_state_dict.keys()) self.assertTrue(is_lora) # when not training the text encoder, all the parameters in the state dict should start # with `"unet"` in their names. starts_with_unet = all(key.startswith("unet") for key in lora_state_dict.keys()) self.assertTrue(starts_with_unet) def test_dreambooth_lora_sdxl_with_text_encoder(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/dreambooth/train_dreambooth_lora_sdxl.py --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-xl-pipe --instance_data_dir docs/source/en/imgs --instance_prompt photo --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --train_text_encoder """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.bin"))) # make sure the state_dict has the correct naming in the parameters. lora_state_dict = torch.load(os.path.join(tmpdir, "pytorch_lora_weights.bin")) is_lora = all("lora" in k for k in lora_state_dict.keys()) self.assertTrue(is_lora) # when not training the text encoder, all the parameters in the state dict should start # with `"unet"` or `"text_encoder"` or `"text_encoder_2"` in their names. keys = lora_state_dict.keys() starts_with_unet = all( k.startswith("unet") or k.startswith("text_encoder") or k.startswith("text_encoder_2") for k in keys ) self.assertTrue(starts_with_unet) def test_custom_diffusion(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/custom_diffusion/train_custom_diffusion.py --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-pipe --instance_data_dir docs/source/en/imgs --instance_prompt <new1> --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 1.0e-05 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --modifier_token <new1> --output_dir {tmpdir} """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_custom_diffusion_weights.bin"))) self.assertTrue(os.path.isfile(os.path.join(tmpdir, "<new1>.bin"))) def test_text_to_image(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/text_to_image/train_text_to_image.py --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-pipe --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --center_crop --random_flip --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "unet", "diffusion_pytorch_model.bin"))) self.assertTrue(os.path.isfile(os.path.join(tmpdir, "scheduler", "scheduler_config.json"))) def test_text_to_image_checkpointing(self): pretrained_model_name_or_path = "hf-internal-testing/tiny-stable-diffusion-pipe" prompt = "a prompt" with tempfile.TemporaryDirectory() as tmpdir: # Run training script with checkpointing # max_train_steps == 5, checkpointing_steps == 2 # Should create checkpoints at steps 2, 4 initial_run_args = f""" examples/text_to_image/train_text_to_image.py --pretrained_model_name_or_path {pretrained_model_name_or_path} --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --center_crop --random_flip --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 5 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=2 --seed=0 """.split() run_command(self._launch_args + initial_run_args) pipe = DiffusionPipeline.from_pretrained(tmpdir, safety_checker=None) pipe(prompt, num_inference_steps=2) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-2", "checkpoint-4"}, ) # check can run an intermediate checkpoint unet = UNet2DConditionModel.from_pretrained(tmpdir, subfolder="checkpoint-2/unet") pipe = DiffusionPipeline.from_pretrained(pretrained_model_name_or_path, unet=unet, safety_checker=None) pipe(prompt, num_inference_steps=2) # Remove checkpoint 2 so that we can check only later checkpoints exist after resuming shutil.rmtree(os.path.join(tmpdir, "checkpoint-2")) # Run training script for 7 total steps resuming from checkpoint 4 resume_run_args = f""" examples/text_to_image/train_text_to_image.py --pretrained_model_name_or_path {pretrained_model_name_or_path} --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --center_crop --random_flip --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 7 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=2 --resume_from_checkpoint=checkpoint-4 --seed=0 """.split() run_command(self._launch_args + resume_run_args) # check can run new fully trained pipeline pipe = DiffusionPipeline.from_pretrained(tmpdir, safety_checker=None) pipe(prompt, num_inference_steps=2) self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, { # no checkpoint-2 -> check old checkpoints do not exist # check new checkpoints exist "checkpoint-4", "checkpoint-6", }, ) def test_text_to_image_checkpointing_use_ema(self): pretrained_model_name_or_path = "hf-internal-testing/tiny-stable-diffusion-pipe" prompt = "a prompt" with tempfile.TemporaryDirectory() as tmpdir: # Run training script with checkpointing # max_train_steps == 5, checkpointing_steps == 2 # Should create checkpoints at steps 2, 4 initial_run_args = f""" examples/text_to_image/train_text_to_image.py --pretrained_model_name_or_path {pretrained_model_name_or_path} --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --center_crop --random_flip --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 5 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=2 --use_ema --seed=0 """.split() run_command(self._launch_args + initial_run_args) pipe = DiffusionPipeline.from_pretrained(tmpdir, safety_checker=None) pipe(prompt, num_inference_steps=2) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-2", "checkpoint-4"}, ) # check can run an intermediate checkpoint unet = UNet2DConditionModel.from_pretrained(tmpdir, subfolder="checkpoint-2/unet") pipe = DiffusionPipeline.from_pretrained(pretrained_model_name_or_path, unet=unet, safety_checker=None) pipe(prompt, num_inference_steps=2) # Remove checkpoint 2 so that we can check only later checkpoints exist after resuming shutil.rmtree(os.path.join(tmpdir, "checkpoint-2")) # Run training script for 7 total steps resuming from checkpoint 4 resume_run_args = f""" examples/text_to_image/train_text_to_image.py --pretrained_model_name_or_path {pretrained_model_name_or_path} --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --center_crop --random_flip --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 7 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=2 --resume_from_checkpoint=checkpoint-4 --use_ema --seed=0 """.split() run_command(self._launch_args + resume_run_args) # check can run new fully trained pipeline pipe = DiffusionPipeline.from_pretrained(tmpdir, safety_checker=None) pipe(prompt, num_inference_steps=2) self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, { # no checkpoint-2 -> check old checkpoints do not exist # check new checkpoints exist "checkpoint-4", "checkpoint-6", }, ) def test_text_to_image_checkpointing_checkpoints_total_limit(self): pretrained_model_name_or_path = "hf-internal-testing/tiny-stable-diffusion-pipe" prompt = "a prompt" with tempfile.TemporaryDirectory() as tmpdir: # Run training script with checkpointing # max_train_steps == 7, checkpointing_steps == 2, checkpoints_total_limit == 2 # Should create checkpoints at steps 2, 4, 6 # with checkpoint at step 2 deleted initial_run_args = f""" examples/text_to_image/train_text_to_image.py --pretrained_model_name_or_path {pretrained_model_name_or_path} --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --center_crop --random_flip --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 7 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=2 --checkpoints_total_limit=2 --seed=0 """.split() run_command(self._launch_args + initial_run_args) pipe = DiffusionPipeline.from_pretrained(tmpdir, safety_checker=None) pipe(prompt, num_inference_steps=2) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, # checkpoint-2 should have been deleted {"checkpoint-4", "checkpoint-6"}, ) def test_text_to_image_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self): pretrained_model_name_or_path = "hf-internal-testing/tiny-stable-diffusion-pipe" prompt = "a prompt" with tempfile.TemporaryDirectory() as tmpdir: # Run training script with checkpointing # max_train_steps == 9, checkpointing_steps == 2 # Should create checkpoints at steps 2, 4, 6, 8 initial_run_args = f""" examples/text_to_image/train_text_to_image.py --pretrained_model_name_or_path {pretrained_model_name_or_path} --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --center_crop --random_flip --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 9 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=2 --seed=0 """.split() run_command(self._launch_args + initial_run_args) pipe = DiffusionPipeline.from_pretrained(tmpdir, safety_checker=None) pipe(prompt, num_inference_steps=2) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-2", "checkpoint-4", "checkpoint-6", "checkpoint-8"}, ) # resume and we should try to checkpoint at 10, where we'll have to remove # checkpoint-2 and checkpoint-4 instead of just a single previous checkpoint resume_run_args = f""" examples/text_to_image/train_text_to_image.py --pretrained_model_name_or_path {pretrained_model_name_or_path} --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --center_crop --random_flip --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 11 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=2 --resume_from_checkpoint=checkpoint-8 --checkpoints_total_limit=3 --seed=0 """.split() run_command(self._launch_args + resume_run_args) pipe = DiffusionPipeline.from_pretrained(tmpdir, safety_checker=None) pipe(prompt, num_inference_steps=2) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-6", "checkpoint-8", "checkpoint-10"}, ) def test_text_to_image_lora_checkpointing_checkpoints_total_limit(self): pretrained_model_name_or_path = "hf-internal-testing/tiny-stable-diffusion-pipe" prompt = "a prompt" with tempfile.TemporaryDirectory() as tmpdir: # Run training script with checkpointing # max_train_steps == 7, checkpointing_steps == 2, checkpoints_total_limit == 2 # Should create checkpoints at steps 2, 4, 6 # with checkpoint at step 2 deleted initial_run_args = f""" examples/text_to_image/train_text_to_image_lora.py --pretrained_model_name_or_path {pretrained_model_name_or_path} --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --center_crop --random_flip --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 7 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=2 --checkpoints_total_limit=2 --seed=0 --num_validation_images=0 """.split() run_command(self._launch_args + initial_run_args) pipe = DiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None ) pipe.load_lora_weights(tmpdir) pipe(prompt, num_inference_steps=2) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, # checkpoint-2 should have been deleted {"checkpoint-4", "checkpoint-6"}, ) def test_text_to_image_lora_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self): pretrained_model_name_or_path = "hf-internal-testing/tiny-stable-diffusion-pipe" prompt = "a prompt" with tempfile.TemporaryDirectory() as tmpdir: # Run training script with checkpointing # max_train_steps == 9, checkpointing_steps == 2 # Should create checkpoints at steps 2, 4, 6, 8 initial_run_args = f""" examples/text_to_image/train_text_to_image_lora.py --pretrained_model_name_or_path {pretrained_model_name_or_path} --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --center_crop --random_flip --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 9 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=2 --seed=0 --num_validation_images=0 """.split() run_command(self._launch_args + initial_run_args) pipe = DiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None ) pipe.load_lora_weights(tmpdir) pipe(prompt, num_inference_steps=2) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-2", "checkpoint-4", "checkpoint-6", "checkpoint-8"}, ) # resume and we should try to checkpoint at 10, where we'll have to remove # checkpoint-2 and checkpoint-4 instead of just a single previous checkpoint resume_run_args = f""" examples/text_to_image/train_text_to_image_lora.py --pretrained_model_name_or_path {pretrained_model_name_or_path} --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --center_crop --random_flip --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 11 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=2 --resume_from_checkpoint=checkpoint-8 --checkpoints_total_limit=3 --seed=0 --num_validation_images=0 """.split() run_command(self._launch_args + resume_run_args) pipe = DiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None ) pipe.load_lora_weights(tmpdir) pipe(prompt, num_inference_steps=2) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-6", "checkpoint-8", "checkpoint-10"}, ) def test_unconditional_checkpointing_checkpoints_total_limit(self): with tempfile.TemporaryDirectory() as tmpdir: initial_run_args = f""" examples/unconditional_image_generation/train_unconditional.py --dataset_name hf-internal-testing/dummy_image_class_data --model_config_name_or_path diffusers/ddpm_dummy --resolution 64 --output_dir {tmpdir} --train_batch_size 1 --num_epochs 1 --gradient_accumulation_steps 1 --ddpm_num_inference_steps 2 --learning_rate 1e-3 --lr_warmup_steps 5 --checkpointing_steps=2 --checkpoints_total_limit=2 """.split() run_command(self._launch_args + initial_run_args) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, # checkpoint-2 should have been deleted {"checkpoint-4", "checkpoint-6"}, ) def test_unconditional_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self): with tempfile.TemporaryDirectory() as tmpdir: initial_run_args = f""" examples/unconditional_image_generation/train_unconditional.py --dataset_name hf-internal-testing/dummy_image_class_data --model_config_name_or_path diffusers/ddpm_dummy --resolution 64 --output_dir {tmpdir} --train_batch_size 1 --num_epochs 1 --gradient_accumulation_steps 1 --ddpm_num_inference_steps 2 --learning_rate 1e-3 --lr_warmup_steps 5 --checkpointing_steps=1 """.split() run_command(self._launch_args + initial_run_args) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-1", "checkpoint-2", "checkpoint-3", "checkpoint-4", "checkpoint-5", "checkpoint-6"}, ) resume_run_args = f""" examples/unconditional_image_generation/train_unconditional.py --dataset_name hf-internal-testing/dummy_image_class_data --model_config_name_or_path diffusers/ddpm_dummy --resolution 64 --output_dir {tmpdir} --train_batch_size 1 --num_epochs 2 --gradient_accumulation_steps 1 --ddpm_num_inference_steps 2 --learning_rate 1e-3 --lr_warmup_steps 5 --resume_from_checkpoint=checkpoint-6 --checkpointing_steps=2 --checkpoints_total_limit=3 """.split() run_command(self._launch_args + resume_run_args) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-8", "checkpoint-10", "checkpoint-12"}, ) def test_textual_inversion_checkpointing(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/textual_inversion/textual_inversion.py --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-pipe --train_data_dir docs/source/en/imgs --learnable_property object --placeholder_token <cat-toy> --initializer_token a --validation_prompt <cat-toy> --validation_steps 1 --save_steps 1 --num_vectors 2 --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 3 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=1 --checkpoints_total_limit=2 """.split() run_command(self._launch_args + test_args) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-2", "checkpoint-3"}, ) def test_textual_inversion_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/textual_inversion/textual_inversion.py --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-pipe --train_data_dir docs/source/en/imgs --learnable_property object --placeholder_token <cat-toy> --initializer_token a --validation_prompt <cat-toy> --validation_steps 1 --save_steps 1 --num_vectors 2 --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 3 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=1 """.split() run_command(self._launch_args + test_args) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-1", "checkpoint-2", "checkpoint-3"}, ) resume_run_args = f""" examples/textual_inversion/textual_inversion.py --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-pipe --train_data_dir docs/source/en/imgs --learnable_property object --placeholder_token <cat-toy> --initializer_token a --validation_prompt <cat-toy> --validation_steps 1 --save_steps 1 --num_vectors 2 --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 4 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=1 --resume_from_checkpoint=checkpoint-3 --checkpoints_total_limit=2 """.split() run_command(self._launch_args + resume_run_args) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-3", "checkpoint-4"}, ) def test_instruct_pix2pix_checkpointing_checkpoints_total_limit(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/instruct_pix2pix/train_instruct_pix2pix.py --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe --dataset_name=hf-internal-testing/instructpix2pix-10-samples --resolution=64 --random_flip --train_batch_size=1 --max_train_steps=7 --checkpointing_steps=2 --checkpoints_total_limit=2 --output_dir {tmpdir} --seed=0 """.split() run_command(self._launch_args + test_args) self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-4", "checkpoint-6"}, ) def test_instruct_pix2pix_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/instruct_pix2pix/train_instruct_pix2pix.py --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe --dataset_name=hf-internal-testing/instructpix2pix-10-samples --resolution=64 --random_flip --train_batch_size=1 --max_train_steps=9 --checkpointing_steps=2 --output_dir {tmpdir} --seed=0 """.split() run_command(self._launch_args + test_args) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-2", "checkpoint-4", "checkpoint-6", "checkpoint-8"}, ) resume_run_args = f""" examples/instruct_pix2pix/train_instruct_pix2pix.py --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe --dataset_name=hf-internal-testing/instructpix2pix-10-samples --resolution=64 --random_flip --train_batch_size=1 --max_train_steps=11 --checkpointing_steps=2 --output_dir {tmpdir} --seed=0 --resume_from_checkpoint=checkpoint-8 --checkpoints_total_limit=3 """.split() run_command(self._launch_args + resume_run_args) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-6", "checkpoint-8", "checkpoint-10"}, ) def test_dreambooth_checkpointing_checkpoints_total_limit(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/dreambooth/train_dreambooth.py --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe --instance_data_dir=docs/source/en/imgs --output_dir={tmpdir} --instance_prompt=prompt --resolution=64 --train_batch_size=1 --gradient_accumulation_steps=1 --max_train_steps=6 --checkpoints_total_limit=2 --checkpointing_steps=2 """.split() run_command(self._launch_args + test_args) self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-4", "checkpoint-6"}, ) def test_dreambooth_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/dreambooth/train_dreambooth.py --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe --instance_data_dir=docs/source/en/imgs --output_dir={tmpdir} --instance_prompt=prompt --resolution=64 --train_batch_size=1 --gradient_accumulation_steps=1 --max_train_steps=9 --checkpointing_steps=2 """.split() run_command(self._launch_args + test_args) self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-2", "checkpoint-4", "checkpoint-6", "checkpoint-8"}, ) resume_run_args = f""" examples/dreambooth/train_dreambooth.py --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe --instance_data_dir=docs/source/en/imgs --output_dir={tmpdir} --instance_prompt=prompt --resolution=64 --train_batch_size=1 --gradient_accumulation_steps=1 --max_train_steps=11 --checkpointing_steps=2 --resume_from_checkpoint=checkpoint-8 --checkpoints_total_limit=3 """.split() run_command(self._launch_args + resume_run_args) self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-6", "checkpoint-8", "checkpoint-10"}, ) def test_dreambooth_lora_checkpointing_checkpoints_total_limit(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/dreambooth/train_dreambooth_lora.py --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe --instance_data_dir=docs/source/en/imgs --output_dir={tmpdir} --instance_prompt=prompt --resolution=64 --train_batch_size=1 --gradient_accumulation_steps=1 --max_train_steps=6 --checkpoints_total_limit=2 --checkpointing_steps=2 """.split() run_command(self._launch_args + test_args) self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-4", "checkpoint-6"}, ) def test_dreambooth_lora_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/dreambooth/train_dreambooth_lora.py --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe --instance_data_dir=docs/source/en/imgs --output_dir={tmpdir} --instance_prompt=prompt --resolution=64 --train_batch_size=1 --gradient_accumulation_steps=1 --max_train_steps=9 --checkpointing_steps=2 """.split() run_command(self._launch_args + test_args) self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-2", "checkpoint-4", "checkpoint-6", "checkpoint-8"}, ) resume_run_args = f""" examples/dreambooth/train_dreambooth_lora.py --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe --instance_data_dir=docs/source/en/imgs --output_dir={tmpdir} --instance_prompt=prompt --resolution=64 --train_batch_size=1 --gradient_accumulation_steps=1 --max_train_steps=11 --checkpointing_steps=2 --resume_from_checkpoint=checkpoint-8 --checkpoints_total_limit=3 """.split() run_command(self._launch_args + resume_run_args) self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-6", "checkpoint-8", "checkpoint-10"}, ) def test_controlnet_checkpointing_checkpoints_total_limit(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/controlnet/train_controlnet.py --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe --dataset_name=hf-internal-testing/fill10 --output_dir={tmpdir} --resolution=64 --train_batch_size=1 --gradient_accumulation_steps=1 --max_train_steps=6 --checkpoints_total_limit=2 --checkpointing_steps=2 --controlnet_model_name_or_path=hf-internal-testing/tiny-controlnet """.split() run_command(self._launch_args + test_args) self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-4", "checkpoint-6"}, ) def test_controlnet_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/controlnet/train_controlnet.py --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe --dataset_name=hf-internal-testing/fill10 --output_dir={tmpdir} --resolution=64 --train_batch_size=1 --gradient_accumulation_steps=1 --controlnet_model_name_or_path=hf-internal-testing/tiny-controlnet --max_train_steps=9 --checkpointing_steps=2 """.split() run_command(self._launch_args + test_args) self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-2", "checkpoint-4", "checkpoint-6", "checkpoint-8"}, ) resume_run_args = f""" examples/controlnet/train_controlnet.py --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe --dataset_name=hf-internal-testing/fill10 --output_dir={tmpdir} --resolution=64 --train_batch_size=1 --gradient_accumulation_steps=1 --controlnet_model_name_or_path=hf-internal-testing/tiny-controlnet --max_train_steps=11 --checkpointing_steps=2 --resume_from_checkpoint=checkpoint-8 --checkpoints_total_limit=3 """.split() run_command(self._launch_args + resume_run_args) self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-8", "checkpoint-10", "checkpoint-12"}, ) def test_controlnet_sdxl(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/controlnet/train_controlnet_sdxl.py --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-xl-pipe --dataset_name=hf-internal-testing/fill10 --output_dir={tmpdir} --resolution=64 --train_batch_size=1 --gradient_accumulation_steps=1 --controlnet_model_name_or_path=hf-internal-testing/tiny-controlnet-sdxl --max_train_steps=9 --checkpointing_steps=2 """.split() run_command(self._launch_args + test_args) self.assertTrue(os.path.isfile(os.path.join(tmpdir, "diffusion_pytorch_model.bin"))) def test_custom_diffusion_checkpointing_checkpoints_total_limit(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/custom_diffusion/train_custom_diffusion.py --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe --instance_data_dir=docs/source/en/imgs --output_dir={tmpdir} --instance_prompt=<new1> --resolution=64 --train_batch_size=1 --modifier_token=<new1> --dataloader_num_workers=0 --max_train_steps=6 --checkpoints_total_limit=2 --checkpointing_steps=2 """.split() run_command(self._launch_args + test_args) self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-4", "checkpoint-6"}, ) def test_custom_diffusion_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/custom_diffusion/train_custom_diffusion.py --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe --instance_data_dir=docs/source/en/imgs --output_dir={tmpdir} --instance_prompt=<new1> --resolution=64 --train_batch_size=1 --modifier_token=<new1> --dataloader_num_workers=0 --max_train_steps=9 --checkpointing_steps=2 """.split() run_command(self._launch_args + test_args) self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-2", "checkpoint-4", "checkpoint-6", "checkpoint-8"}, ) resume_run_args = f""" examples/custom_diffusion/train_custom_diffusion.py --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe --instance_data_dir=docs/source/en/imgs --output_dir={tmpdir} --instance_prompt=<new1> --resolution=64 --train_batch_size=1 --modifier_token=<new1> --dataloader_num_workers=0 --max_train_steps=11 --checkpointing_steps=2 --resume_from_checkpoint=checkpoint-8 --checkpoints_total_limit=3 """.split() run_command(self._launch_args + resume_run_args) self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-6", "checkpoint-8", "checkpoint-10"}, )
0
hf_public_repos/diffusers
hf_public_repos/diffusers/examples/README.md
<!--- Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # 🧨 Diffusers Examples Diffusers examples are a collection of scripts to demonstrate how to effectively use the `diffusers` library for a variety of use cases involving training or fine-tuning. **Note**: If you are looking for **official** examples on how to use `diffusers` for inference, please have a look at [src/diffusers/pipelines](https://github.com/huggingface/diffusers/tree/main/src/diffusers/pipelines) Our examples aspire to be **self-contained**, **easy-to-tweak**, **beginner-friendly** and for **one-purpose-only**. More specifically, this means: - **Self-contained**: An example script shall only depend on "pip-install-able" Python packages that can be found in a `requirements.txt` file. Example scripts shall **not** depend on any local files. This means that one can simply download an example script, *e.g.* [train_unconditional.py](https://github.com/huggingface/diffusers/blob/main/examples/unconditional_image_generation/train_unconditional.py), install the required dependencies, *e.g.* [requirements.txt](https://github.com/huggingface/diffusers/blob/main/examples/unconditional_image_generation/requirements.txt) and execute the example script. - **Easy-to-tweak**: While we strive to present as many use cases as possible, the example scripts are just that - examples. It is expected that they won't work out-of-the box on your specific problem and that you will be required to change a few lines of code to adapt them to your needs. To help you with that, most of the examples fully expose the preprocessing of the data and the training loop to allow you to tweak and edit them as required. - **Beginner-friendly**: We do not aim for providing state-of-the-art training scripts for the newest models, but rather examples that can be used as a way to better understand diffusion models and how to use them with the `diffusers` library. We often purposefully leave out certain state-of-the-art methods if we consider them too complex for beginners. - **One-purpose-only**: Examples should show one task and one task only. Even if a task is from a modeling point of view very similar, *e.g.* image super-resolution and image modification tend to use the same model and training method, we want examples to showcase only one task to keep them as readable and easy-to-understand as possible. We provide **official** examples that cover the most popular tasks of diffusion models. *Official* examples are **actively** maintained by the `diffusers` maintainers and we try to rigorously follow our example philosophy as defined above. If you feel like another important example should exist, we are more than happy to welcome a [Feature Request](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=&template=feature_request.md&title=) or directly a [Pull Request](https://github.com/huggingface/diffusers/compare) from you! Training examples show how to pretrain or fine-tune diffusion models for a variety of tasks. Currently we support: | Task | 🤗 Accelerate | 🤗 Datasets | Colab |---|---|:---:|:---:| | [**Unconditional Image Generation**](./unconditional_image_generation) | ✅ | ✅ | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/training_example.ipynb) | [**Text-to-Image fine-tuning**](./text_to_image) | ✅ | ✅ | | [**Textual Inversion**](./textual_inversion) | ✅ | - | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb) | [**Dreambooth**](./dreambooth) | ✅ | - | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_training.ipynb) | [**ControlNet**](./controlnet) | ✅ | ✅ | - | [**InstructPix2Pix**](./instruct_pix2pix) | ✅ | ✅ | - | [**Reinforcement Learning for Control**](https://github.com/huggingface/diffusers/blob/main/examples/reinforcement_learning/run_diffusers_locomotion.py) | - | - | coming soon. ## Community In addition, we provide **community** examples, which are examples added and maintained by our community. Community examples can consist of both *training* examples or *inference* pipelines. For such examples, we are more lenient regarding the philosophy defined above and also cannot guarantee to provide maintenance for every issue. Examples that are useful for the community, but are either not yet deemed popular or not yet following our above philosophy should go into the [community examples](https://github.com/huggingface/diffusers/tree/main/examples/community) folder. The community folder therefore includes training examples and inference pipelines. **Note**: Community examples can be a [great first contribution](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) to show to the community how you like to use `diffusers` 🪄. ## Research Projects We also provide **research_projects** examples that are maintained by the community as defined in the respective research project folders. These examples are useful and offer the extended capabilities which are complementary to the official examples. You may refer to [research_projects](https://github.com/huggingface/diffusers/tree/main/examples/research_projects) for details. ## Important note To make sure you can successfully run the latest versions of the example scripts, you have to **install the library from source** and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install . ``` Then cd in the example folder of your choice and run ```bash pip install -r requirements.txt ```
0
hf_public_repos/diffusers
hf_public_repos/diffusers/examples/conftest.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. git_repo_path = abspath(join(dirname(dirname(dirname(__file__))), "src")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="ignore", category=FutureWarning) def pytest_addoption(parser): from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(parser) def pytest_terminal_summary(terminalreporter): from diffusers.utils.testing_utils import pytest_terminal_summary_main make_reports = terminalreporter.config.getoption("--make-reports") if make_reports: pytest_terminal_summary_main(terminalreporter, id=make_reports)
0
hf_public_repos/diffusers/examples
hf_public_repos/diffusers/examples/research_projects/README.md
# Research projects This folder contains various research projects using 🧨 Diffusers. They are not really maintained by the core maintainers of this library and often require a specific version of Diffusers that is indicated in the requirements file of each folder. Updating them to the most recent version of the library will require some work. To use any of them, just run the command ``` pip install -r requirements.txt ``` inside the folder of your choice. If you need help with any of those, please open an issue where you directly ping the author(s), as indicated at the top of the README of each folder.
0
hf_public_repos/diffusers/examples/research_projects
hf_public_repos/diffusers/examples/research_projects/intel_opts/README.md
## Diffusers examples with Intel optimizations **This research project is not actively maintained by the diffusers team. For any questions or comments, please make sure to tag @hshen14 .** This aims to provide diffusers examples with Intel optimizations such as Bfloat16 for training/fine-tuning acceleration and 8-bit integer (INT8) for inference acceleration on Intel platforms. ## Accelerating the fine-tuning for textual inversion We accelereate the fine-tuning for textual inversion with Intel Extension for PyTorch. The [examples](textual_inversion) enable both single node and multi-node distributed training with Bfloat16 support on Intel Xeon Scalable Processor. ## Accelerating the inference for Stable Diffusion using Bfloat16 We start the inference acceleration with Bfloat16 using Intel Extension for PyTorch. The [script](inference_bf16.py) is generally designed to support standard Stable Diffusion models with Bfloat16 support. ```bash pip install diffusers transformers accelerate scipy safetensors export KMP_BLOCKTIME=1 export KMP_SETTINGS=1 export KMP_AFFINITY=granularity=fine,compact,1,0 # Intel OpenMP export OMP_NUM_THREADS=< Cores to use > export LD_PRELOAD=${LD_PRELOAD}:/path/to/lib/libiomp5.so # Jemalloc is a recommended malloc implementation that emphasizes fragmentation avoidance and scalable concurrency support. export LD_PRELOAD=${LD_PRELOAD}:/path/to/lib/libjemalloc.so export MALLOC_CONF="oversize_threshold:1,background_thread:true,metadata_thp:auto,dirty_decay_ms:-1,muzzy_decay_ms:9000000000" # Launch with default DDIM numactl --membind <node N> -C <cpu list> python python inference_bf16.py # Launch with DPMSolverMultistepScheduler numactl --membind <node N> -C <cpu list> python python inference_bf16.py --dpm ``` ## Accelerating the inference for Stable Diffusion using INT8 Coming soon ...
0
hf_public_repos/diffusers/examples/research_projects
hf_public_repos/diffusers/examples/research_projects/intel_opts/inference_bf16.py
import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline parser = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False) parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not") parser.add_argument("--steps", default=None, type=int, help="Num inference steps") args = parser.parse_args() device = "cpu" prompt = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings" model_id = "path-to-your-trained-model" pipe = StableDiffusionPipeline.from_pretrained(model_id) if args.dpm: pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe = pipe.to(device) # to channels last pipe.unet = pipe.unet.to(memory_format=torch.channels_last) pipe.vae = pipe.vae.to(memory_format=torch.channels_last) pipe.text_encoder = pipe.text_encoder.to(memory_format=torch.channels_last) if pipe.requires_safety_checker: pipe.safety_checker = pipe.safety_checker.to(memory_format=torch.channels_last) # optimize with ipex sample = torch.randn(2, 4, 64, 64) timestep = torch.rand(1) * 999 encoder_hidden_status = torch.randn(2, 77, 768) input_example = (sample, timestep, encoder_hidden_status) try: pipe.unet = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloat16, inplace=True, sample_input=input_example) except Exception: pipe.unet = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloat16, inplace=True) pipe.vae = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloat16, inplace=True) pipe.text_encoder = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloat16, inplace=True) if pipe.requires_safety_checker: pipe.safety_checker = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloat16, inplace=True) # compute seed = 666 generator = torch.Generator(device).manual_seed(seed) generate_kwargs = {"generator": generator} if args.steps is not None: generate_kwargs["num_inference_steps"] = args.steps with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloat16): image = pipe(prompt, **generate_kwargs).images[0] # save image image.save("generated.png")
0
hf_public_repos/diffusers/examples/research_projects/intel_opts
hf_public_repos/diffusers/examples/research_projects/intel_opts/textual_inversion/requirements.txt
accelerate>=0.16.0 torchvision transformers>=4.21.0 ftfy tensorboard Jinja2 intel_extension_for_pytorch>=1.13
0
hf_public_repos/diffusers/examples/research_projects/intel_opts
hf_public_repos/diffusers/examples/research_projects/intel_opts/textual_inversion/README.md
## Textual Inversion fine-tuning example [Textual inversion](https://arxiv.org/abs/2208.01618) is a method to personalize text2image models like stable diffusion on your own images using just 3-5 examples. The `textual_inversion.py` script shows how to implement the training procedure and adapt it for stable diffusion. ## Training with Intel Extension for PyTorch Intel Extension for PyTorch provides the optimizations for faster training and inference on CPUs. You can leverage the training example "textual_inversion.py". Follow the [instructions](https://github.com/huggingface/diffusers/tree/main/examples/textual_inversion) to get the model and [dataset](https://huggingface.co/sd-concepts-library/dicoo2) before running the script. The example supports both single node and multi-node distributed training: ### Single node training ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export DATA_DIR="path-to-dir-containing-dicoo-images" python textual_inversion.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --train_data_dir=$DATA_DIR \ --learnable_property="object" \ --placeholder_token="<dicoo>" --initializer_token="toy" \ --seed=7 \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=1 \ --max_train_steps=3000 \ --learning_rate=2.5e-03 --scale_lr \ --output_dir="textual_inversion_dicoo" ``` Note: Bfloat16 is available on Intel Xeon Scalable Processors Cooper Lake or Sapphire Rapids. You may not get performance speedup without Bfloat16 support. ### Multi-node distributed training Before running the scripts, make sure to install the library's training dependencies successfully: ```bash python -m pip install oneccl_bind_pt==1.13 -f https://developer.intel.com/ipex-whl-stable-cpu ``` ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export DATA_DIR="path-to-dir-containing-dicoo-images" oneccl_bindings_for_pytorch_path=$(python -c "from oneccl_bindings_for_pytorch import cwd; print(cwd)") source $oneccl_bindings_for_pytorch_path/env/setvars.sh python -m intel_extension_for_pytorch.cpu.launch --distributed \ --hostfile hostfile --nnodes 2 --nproc_per_node 2 textual_inversion.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --train_data_dir=$DATA_DIR \ --learnable_property="object" \ --placeholder_token="<dicoo>" --initializer_token="toy" \ --seed=7 \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=1 \ --max_train_steps=750 \ --learning_rate=2.5e-03 --scale_lr \ --output_dir="textual_inversion_dicoo" ``` The above is a simple distributed training usage on 2 nodes with 2 processes on each node. Add the right hostname or ip address in the "hostfile" and make sure these 2 nodes are reachable from each other. For more details, please refer to the [user guide](https://github.com/intel/torch-ccl). ### Reference We publish a [Medium blog](https://medium.com/intel-analytics-software/personalized-stable-diffusion-with-few-shot-fine-tuning-on-a-single-cpu-f01a3316b13) on how to create your own Stable Diffusion model on CPUs using textual inversion. Try it out now, if you have interests.
0
hf_public_repos/diffusers/examples/research_projects/intel_opts
hf_public_repos/diffusers/examples/research_projects/intel_opts/textual_inversion/textual_inversion_bf16.py
import argparse import itertools import math import os import random from pathlib import Path import intel_extension_for_pytorch as ipex import numpy as np import PIL import torch import torch.nn.functional as F import torch.utils.checkpoint from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import ProjectConfiguration, set_seed from huggingface_hub import create_repo, upload_folder # TODO: remove and import from diffusers.utils when the new version of diffusers is released from packaging import version from PIL import Image from torch.utils.data import Dataset from torchvision import transforms from tqdm.auto import tqdm from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDPMScheduler, PNDMScheduler, StableDiffusionPipeline, UNet2DConditionModel from diffusers.optimization import get_scheduler from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker from diffusers.utils import check_min_version if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"): PIL_INTERPOLATION = { "linear": PIL.Image.Resampling.BILINEAR, "bilinear": PIL.Image.Resampling.BILINEAR, "bicubic": PIL.Image.Resampling.BICUBIC, "lanczos": PIL.Image.Resampling.LANCZOS, "nearest": PIL.Image.Resampling.NEAREST, } else: PIL_INTERPOLATION = { "linear": PIL.Image.LINEAR, "bilinear": PIL.Image.BILINEAR, "bicubic": PIL.Image.BICUBIC, "lanczos": PIL.Image.LANCZOS, "nearest": PIL.Image.NEAREST, } # ------------------------------------------------------------------------------ # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.13.0.dev0") logger = get_logger(__name__) def save_progress(text_encoder, placeholder_token_id, accelerator, args, save_path): logger.info("Saving embeddings") learned_embeds = accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[placeholder_token_id] learned_embeds_dict = {args.placeholder_token: learned_embeds.detach().cpu()} torch.save(learned_embeds_dict, save_path) def parse_args(): parser = argparse.ArgumentParser(description="Simple example of a training script.") parser.add_argument( "--save_steps", type=int, default=500, help="Save learned_embeds.bin every X updates steps.", ) parser.add_argument( "--only_save_embeds", action="store_true", default=False, help="Save only the embeddings for the new concept.", ) parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--train_data_dir", type=str, default=None, required=True, help="A folder containing the training data." ) parser.add_argument( "--placeholder_token", type=str, default=None, required=True, help="A token to use as a placeholder for the concept.", ) parser.add_argument( "--initializer_token", type=str, default=None, required=True, help="A token to use as initializer word." ) parser.add_argument("--learnable_property", type=str, default="object", help="Choose between 'object' and 'style'") parser.add_argument("--repeats", type=int, default=100, help="How many times to repeat the training data.") parser.add_argument( "--output_dir", type=str, default="text-inversion-model", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution." ) parser.add_argument( "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." ) parser.add_argument("--num_train_epochs", type=int, default=100) parser.add_argument( "--max_train_steps", type=int, default=5000, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--learning_rate", type=float, default=1e-4, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=True, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--mixed_precision", type=str, default="no", choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank if args.train_data_dir is None: raise ValueError("You must specify a train data directory.") return args imagenet_templates_small = [ "a photo of a {}", "a rendering of a {}", "a cropped photo of the {}", "the photo of a {}", "a photo of a clean {}", "a photo of a dirty {}", "a dark photo of the {}", "a photo of my {}", "a photo of the cool {}", "a close-up photo of a {}", "a bright photo of the {}", "a cropped photo of a {}", "a photo of the {}", "a good photo of the {}", "a photo of one {}", "a close-up photo of the {}", "a rendition of the {}", "a photo of the clean {}", "a rendition of a {}", "a photo of a nice {}", "a good photo of a {}", "a photo of the nice {}", "a photo of the small {}", "a photo of the weird {}", "a photo of the large {}", "a photo of a cool {}", "a photo of a small {}", ] imagenet_style_templates_small = [ "a painting in the style of {}", "a rendering in the style of {}", "a cropped painting in the style of {}", "the painting in the style of {}", "a clean painting in the style of {}", "a dirty painting in the style of {}", "a dark painting in the style of {}", "a picture in the style of {}", "a cool painting in the style of {}", "a close-up painting in the style of {}", "a bright painting in the style of {}", "a cropped painting in the style of {}", "a good painting in the style of {}", "a close-up painting in the style of {}", "a rendition in the style of {}", "a nice painting in the style of {}", "a small painting in the style of {}", "a weird painting in the style of {}", "a large painting in the style of {}", ] class TextualInversionDataset(Dataset): def __init__( self, data_root, tokenizer, learnable_property="object", # [object, style] size=512, repeats=100, interpolation="bicubic", flip_p=0.5, set="train", placeholder_token="*", center_crop=False, ): self.data_root = data_root self.tokenizer = tokenizer self.learnable_property = learnable_property self.size = size self.placeholder_token = placeholder_token self.center_crop = center_crop self.flip_p = flip_p self.image_paths = [os.path.join(self.data_root, file_path) for file_path in os.listdir(self.data_root)] self.num_images = len(self.image_paths) self._length = self.num_images if set == "train": self._length = self.num_images * repeats self.interpolation = { "linear": PIL_INTERPOLATION["linear"], "bilinear": PIL_INTERPOLATION["bilinear"], "bicubic": PIL_INTERPOLATION["bicubic"], "lanczos": PIL_INTERPOLATION["lanczos"], }[interpolation] self.templates = imagenet_style_templates_small if learnable_property == "style" else imagenet_templates_small self.flip_transform = transforms.RandomHorizontalFlip(p=self.flip_p) def __len__(self): return self._length def __getitem__(self, i): example = {} image = Image.open(self.image_paths[i % self.num_images]) if not image.mode == "RGB": image = image.convert("RGB") placeholder_string = self.placeholder_token text = random.choice(self.templates).format(placeholder_string) example["input_ids"] = self.tokenizer( text, padding="max_length", truncation=True, max_length=self.tokenizer.model_max_length, return_tensors="pt", ).input_ids[0] # default to score-sde preprocessing img = np.array(image).astype(np.uint8) if self.center_crop: crop = min(img.shape[0], img.shape[1]) ( h, w, ) = ( img.shape[0], img.shape[1], ) img = img[(h - crop) // 2 : (h + crop) // 2, (w - crop) // 2 : (w + crop) // 2] image = Image.fromarray(img) image = image.resize((self.size, self.size), resample=self.interpolation) image = self.flip_transform(image) image = np.array(image).astype(np.uint8) image = (image / 127.5 - 1.0).astype(np.float32) example["pixel_values"] = torch.from_numpy(image).permute(2, 0, 1) return example def freeze_params(params): for param in params: param.requires_grad = False def main(): args = parse_args() logging_dir = os.path.join(args.output_dir, args.logging_dir) accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_config=accelerator_project_config, ) # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token ).repo_id # Load the tokenizer and add the placeholder token as a additional special token if args.tokenizer_name: tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name) elif args.pretrained_model_name_or_path: tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer") # Add the placeholder token in tokenizer num_added_tokens = tokenizer.add_tokens(args.placeholder_token) if num_added_tokens == 0: raise ValueError( f"The tokenizer already contains the token {args.placeholder_token}. Please pass a different" " `placeholder_token` that is not already in the tokenizer." ) # Convert the initializer_token, placeholder_token to ids token_ids = tokenizer.encode(args.initializer_token, add_special_tokens=False) # Check if initializer_token is a single token or a sequence of tokens if len(token_ids) > 1: raise ValueError("The initializer token must be a single token.") initializer_token_id = token_ids[0] placeholder_token_id = tokenizer.convert_tokens_to_ids(args.placeholder_token) # Load models and create wrapper for stable diffusion text_encoder = CLIPTextModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, ) vae = AutoencoderKL.from_pretrained( args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision, ) unet = UNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision, ) # Resize the token embeddings as we are adding new special tokens to the tokenizer text_encoder.resize_token_embeddings(len(tokenizer)) # Initialise the newly added placeholder token with the embeddings of the initializer token token_embeds = text_encoder.get_input_embeddings().weight.data token_embeds[placeholder_token_id] = token_embeds[initializer_token_id] # Freeze vae and unet freeze_params(vae.parameters()) freeze_params(unet.parameters()) # Freeze all parameters except for the token embeddings in text encoder params_to_freeze = itertools.chain( text_encoder.text_model.encoder.parameters(), text_encoder.text_model.final_layer_norm.parameters(), text_encoder.text_model.embeddings.position_embedding.parameters(), ) freeze_params(params_to_freeze) if args.scale_lr: args.learning_rate = ( args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes ) # Initialize the optimizer optimizer = torch.optim.AdamW( text_encoder.get_input_embeddings().parameters(), # only optimize the embeddings lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") train_dataset = TextualInversionDataset( data_root=args.train_data_dir, tokenizer=tokenizer, size=args.resolution, placeholder_token=args.placeholder_token, repeats=args.repeats, learnable_property=args.learnable_property, center_crop=args.center_crop, set="train", ) train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=args.train_batch_size, shuffle=True) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes, num_training_steps=args.max_train_steps * accelerator.num_processes, ) text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( text_encoder, optimizer, train_dataloader, lr_scheduler ) # Move vae and unet to device vae.to(accelerator.device) unet.to(accelerator.device) # Keep vae and unet in eval model as we don't train these vae.eval() unet.eval() unet = ipex.optimize(unet, dtype=torch.bfloat16, inplace=True) vae = ipex.optimize(vae, dtype=torch.bfloat16, inplace=True) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: accelerator.init_trackers("textual_inversion", config=vars(args)) # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") # Only show the progress bar once on each machine. progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) progress_bar.set_description("Steps") global_step = 0 text_encoder.train() text_encoder, optimizer = ipex.optimize(text_encoder, optimizer=optimizer, dtype=torch.bfloat16) for epoch in range(args.num_train_epochs): for step, batch in enumerate(train_dataloader): with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloat16): with accelerator.accumulate(text_encoder): # Convert images to latent space latents = vae.encode(batch["pixel_values"]).latent_dist.sample().detach() latents = latents * vae.config.scaling_factor # Sample noise that we'll add to the latents noise = torch.randn(latents.shape).to(latents.device) bsz = latents.shape[0] # Sample a random timestep for each image timesteps = torch.randint( 0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device ).long() # Add noise to the latents according to the noise magnitude at each timestep # (this is the forward diffusion process) noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) # Get the text embedding for conditioning encoder_hidden_states = text_encoder(batch["input_ids"])[0] # Predict the noise residual model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample # Get the target for loss depending on the prediction type if noise_scheduler.config.prediction_type == "epsilon": target = noise elif noise_scheduler.config.prediction_type == "v_prediction": target = noise_scheduler.get_velocity(latents, noise, timesteps) else: raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") loss = F.mse_loss(model_pred, target, reduction="none").mean([1, 2, 3]).mean() accelerator.backward(loss) # Zero out the gradients for all token embeddings except the newly added # embeddings for the concept, as we only want to optimize the concept embeddings if accelerator.num_processes > 1: grads = text_encoder.module.get_input_embeddings().weight.grad else: grads = text_encoder.get_input_embeddings().weight.grad # Get the index for tokens that we want to zero the grads for index_grads_to_zero = torch.arange(len(tokenizer)) != placeholder_token_id grads.data[index_grads_to_zero, :] = grads.data[index_grads_to_zero, :].fill_(0) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: progress_bar.update(1) global_step += 1 if global_step % args.save_steps == 0: save_path = os.path.join(args.output_dir, f"learned_embeds-steps-{global_step}.bin") save_progress(text_encoder, placeholder_token_id, accelerator, args, save_path) logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) accelerator.log(logs, step=global_step) if global_step >= args.max_train_steps: break accelerator.wait_for_everyone() # Create the pipeline using using the trained modules and save it. if accelerator.is_main_process: if args.push_to_hub and args.only_save_embeds: logger.warn("Enabling full model saving because --push_to_hub=True was specified.") save_full_model = True else: save_full_model = not args.only_save_embeds if save_full_model: pipeline = StableDiffusionPipeline( text_encoder=accelerator.unwrap_model(text_encoder), vae=vae, unet=unet, tokenizer=tokenizer, scheduler=PNDMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler"), safety_checker=StableDiffusionSafetyChecker.from_pretrained("CompVis/stable-diffusion-safety-checker"), feature_extractor=CLIPImageProcessor.from_pretrained("openai/clip-vit-base-patch32"), ) pipeline.save_pretrained(args.output_dir) # Save the newly trained embeddings save_path = os.path.join(args.output_dir, "learned_embeds.bin") save_progress(text_encoder, placeholder_token_id, accelerator, args, save_path) if args.push_to_hub: upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) accelerator.end_training() if __name__ == "__main__": main()
0
hf_public_repos/diffusers/examples/research_projects/intel_opts
hf_public_repos/diffusers/examples/research_projects/intel_opts/textual_inversion_dfq/requirements.txt
accelerate torchvision transformers>=4.25.0 ftfy tensorboard modelcards neural-compressor
0
hf_public_repos/diffusers/examples/research_projects/intel_opts
hf_public_repos/diffusers/examples/research_projects/intel_opts/textual_inversion_dfq/README.md
# Distillation for quantization on Textual Inversion models to personalize text2image [Textual inversion](https://arxiv.org/abs/2208.01618) is a method to personalize text2image models like stable diffusion on your own images._By using just 3-5 images new concepts can be taught to Stable Diffusion and the model personalized on your own images_ The `textual_inversion.py` script shows how to implement the training procedure and adapt it for stable diffusion. We have enabled distillation for quantization in `textual_inversion.py` to do quantization aware training as well as distillation on the model generated by Textual Inversion method. ## Installing the dependencies Before running the scripts, make sure to install the library's training dependencies: ```bash pip install -r requirements.txt ``` ## Prepare Datasets One picture which is from the huggingface datasets [sd-concepts-library/dicoo2](https://huggingface.co/sd-concepts-library/dicoo2) is needed, and save it to the `./dicoo` directory. The picture is shown below: <a href="https://huggingface.co/sd-concepts-library/dicoo2/blob/main/concept_images/1.jpeg"> <img src="https://huggingface.co/sd-concepts-library/dicoo2/resolve/main/concept_images/1.jpeg" width = "300" height="300"> </a> ## Get a FP32 Textual Inversion model Use the following command to fine-tune the Stable Diffusion model on the above dataset to obtain the FP32 Textual Inversion model. ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export DATA_DIR="./dicoo" accelerate launch textual_inversion.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --train_data_dir=$DATA_DIR \ --learnable_property="object" \ --placeholder_token="<dicoo>" --initializer_token="toy" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --max_train_steps=3000 \ --learning_rate=5.0e-04 --scale_lr \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --output_dir="dicoo_model" ``` ## Do distillation for quantization Distillation for quantization is a method that combines [intermediate layer knowledge distillation](https://github.com/intel/neural-compressor/blob/master/docs/source/distillation.md#intermediate-layer-knowledge-distillation) and [quantization aware training](https://github.com/intel/neural-compressor/blob/master/docs/source/quantization.md#quantization-aware-training) in the same training process to improve the performance of the quantized model. Provided a FP32 model, the distillation for quantization approach will take this model itself as the teacher model and transfer the knowledges of the specified layers to the student model, i.e. quantized version of the FP32 model, during the quantization aware training process. Once you have the FP32 Textual Inversion model, the following command will take the FP32 Textual Inversion model as input to do distillation for quantization and generate the INT8 Textual Inversion model. ```bash export FP32_MODEL_NAME="./dicoo_model" export DATA_DIR="./dicoo" accelerate launch textual_inversion.py \ --pretrained_model_name_or_path=$FP32_MODEL_NAME \ --train_data_dir=$DATA_DIR \ --use_ema --learnable_property="object" \ --placeholder_token="<dicoo>" --initializer_token="toy" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --max_train_steps=300 \ --learning_rate=5.0e-04 --max_grad_norm=3 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --output_dir="int8_model" \ --do_quantization --do_distillation --verify_loading ``` After the distillation for quantization process, the quantized UNet would be 4 times smaller (3279MB -> 827MB). ## Inference Once you have trained a INT8 model with the above command, the inference can be done simply using the `text2images.py` script. Make sure to include the `placeholder_token` in your prompt. ```bash export INT8_MODEL_NAME="./int8_model" python text2images.py \ --pretrained_model_name_or_path=$INT8_MODEL_NAME \ --caption "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings." \ --images_num 4 ``` Here is the comparison of images generated by the FP32 model (left) and INT8 model (right) respectively: <p float="left"> <img src="https://huggingface.co/datasets/Intel/textual_inversion_dicoo_dfq/resolve/main/FP32.png" width = "300" height = "300" alt="FP32" align=center /> <img src="https://huggingface.co/datasets/Intel/textual_inversion_dicoo_dfq/resolve/main/INT8.png" width = "300" height = "300" alt="INT8" align=center /> </p>
0
hf_public_repos/diffusers/examples/research_projects/intel_opts
hf_public_repos/diffusers/examples/research_projects/intel_opts/textual_inversion_dfq/textual_inversion.py
import argparse import itertools import math import os import random from pathlib import Path from typing import Iterable, Optional import numpy as np import PIL import torch import torch.nn.functional as F import torch.utils.checkpoint from accelerate import Accelerator from accelerate.utils import ProjectConfiguration, set_seed from huggingface_hub import HfFolder, Repository, whoami from neural_compressor.utils import logger from packaging import version from PIL import Image from torch.utils.data import Dataset from torchvision import transforms from tqdm.auto import tqdm from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel from diffusers.optimization import get_scheduler if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"): PIL_INTERPOLATION = { "linear": PIL.Image.Resampling.BILINEAR, "bilinear": PIL.Image.Resampling.BILINEAR, "bicubic": PIL.Image.Resampling.BICUBIC, "lanczos": PIL.Image.Resampling.LANCZOS, "nearest": PIL.Image.Resampling.NEAREST, } else: PIL_INTERPOLATION = { "linear": PIL.Image.LINEAR, "bilinear": PIL.Image.BILINEAR, "bicubic": PIL.Image.BICUBIC, "lanczos": PIL.Image.LANCZOS, "nearest": PIL.Image.NEAREST, } # ------------------------------------------------------------------------------ def save_progress(text_encoder, placeholder_token_id, accelerator, args, save_path): logger.info("Saving embeddings") learned_embeds = accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[placeholder_token_id] learned_embeds_dict = {args.placeholder_token: learned_embeds.detach().cpu()} torch.save(learned_embeds_dict, save_path) def parse_args(): parser = argparse.ArgumentParser(description="Example of distillation for quantization on Textual Inversion.") parser.add_argument( "--save_steps", type=int, default=500, help="Save learned_embeds.bin every X updates steps.", ) parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--train_data_dir", type=str, default=None, required=True, help="A folder containing the training data." ) parser.add_argument( "--placeholder_token", type=str, default=None, required=True, help="A token to use as a placeholder for the concept.", ) parser.add_argument( "--initializer_token", type=str, default=None, required=True, help="A token to use as initializer word." ) parser.add_argument("--learnable_property", type=str, default="object", help="Choose between 'object' and 'style'") parser.add_argument("--repeats", type=int, default=100, help="How many times to repeat the training data.") parser.add_argument( "--output_dir", type=str, default="text-inversion-model", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument( "--cache_dir", type=str, default=None, help="The directory where the downloaded models and datasets will be stored.", ) parser.add_argument("--seed", type=int, default=42, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution" ) parser.add_argument( "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." ) parser.add_argument("--num_train_epochs", type=int, default=100) parser.add_argument( "--max_train_steps", type=int, default=5000, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--learning_rate", type=float, default=1e-4, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--mixed_precision", type=str, default="no", choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." ), ) parser.add_argument("--use_ema", action="store_true", help="Whether to use EMA model.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--do_quantization", action="store_true", help="Whether or not to do quantization.") parser.add_argument("--do_distillation", action="store_true", help="Whether or not to do distillation.") parser.add_argument( "--verify_loading", action="store_true", help="Whether or not to verify the loading of the quantized model." ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank if args.train_data_dir is None: raise ValueError("You must specify a train data directory.") return args imagenet_templates_small = [ "a photo of a {}", "a rendering of a {}", "a cropped photo of the {}", "the photo of a {}", "a photo of a clean {}", "a photo of a dirty {}", "a dark photo of the {}", "a photo of my {}", "a photo of the cool {}", "a close-up photo of a {}", "a bright photo of the {}", "a cropped photo of a {}", "a photo of the {}", "a good photo of the {}", "a photo of one {}", "a close-up photo of the {}", "a rendition of the {}", "a photo of the clean {}", "a rendition of a {}", "a photo of a nice {}", "a good photo of a {}", "a photo of the nice {}", "a photo of the small {}", "a photo of the weird {}", "a photo of the large {}", "a photo of a cool {}", "a photo of a small {}", ] imagenet_style_templates_small = [ "a painting in the style of {}", "a rendering in the style of {}", "a cropped painting in the style of {}", "the painting in the style of {}", "a clean painting in the style of {}", "a dirty painting in the style of {}", "a dark painting in the style of {}", "a picture in the style of {}", "a cool painting in the style of {}", "a close-up painting in the style of {}", "a bright painting in the style of {}", "a cropped painting in the style of {}", "a good painting in the style of {}", "a close-up painting in the style of {}", "a rendition in the style of {}", "a nice painting in the style of {}", "a small painting in the style of {}", "a weird painting in the style of {}", "a large painting in the style of {}", ] # Adapted from torch-ema https://github.com/fadel/pytorch_ema/blob/master/torch_ema/ema.py#L14 class EMAModel: """ Exponential Moving Average of models weights """ def __init__(self, parameters: Iterable[torch.nn.Parameter], decay=0.9999): parameters = list(parameters) self.shadow_params = [p.clone().detach() for p in parameters] self.decay = decay self.optimization_step = 0 def get_decay(self, optimization_step): """ Compute the decay factor for the exponential moving average. """ value = (1 + optimization_step) / (10 + optimization_step) return 1 - min(self.decay, value) @torch.no_grad() def step(self, parameters): parameters = list(parameters) self.optimization_step += 1 self.decay = self.get_decay(self.optimization_step) for s_param, param in zip(self.shadow_params, parameters): if param.requires_grad: tmp = self.decay * (s_param - param) s_param.sub_(tmp) else: s_param.copy_(param) torch.cuda.empty_cache() def copy_to(self, parameters: Iterable[torch.nn.Parameter]) -> None: """ Copy current averaged parameters into given collection of parameters. Args: parameters: Iterable of `torch.nn.Parameter`; the parameters to be updated with the stored moving averages. If `None`, the parameters with which this `ExponentialMovingAverage` was initialized will be used. """ parameters = list(parameters) for s_param, param in zip(self.shadow_params, parameters): param.data.copy_(s_param.data) def to(self, device=None, dtype=None) -> None: r"""Move internal buffers of the ExponentialMovingAverage to `device`. Args: device: like `device` argument to `torch.Tensor.to` """ # .to() on the tensors handles None correctly self.shadow_params = [ p.to(device=device, dtype=dtype) if p.is_floating_point() else p.to(device=device) for p in self.shadow_params ] class TextualInversionDataset(Dataset): def __init__( self, data_root, tokenizer, learnable_property="object", # [object, style] size=512, repeats=100, interpolation="bicubic", flip_p=0.5, set="train", placeholder_token="*", center_crop=False, ): self.data_root = data_root self.tokenizer = tokenizer self.learnable_property = learnable_property self.size = size self.placeholder_token = placeholder_token self.center_crop = center_crop self.flip_p = flip_p self.image_paths = [os.path.join(self.data_root, file_path) for file_path in os.listdir(self.data_root)] self.num_images = len(self.image_paths) self._length = self.num_images if set == "train": self._length = self.num_images * repeats self.interpolation = { "linear": PIL_INTERPOLATION["linear"], "bilinear": PIL_INTERPOLATION["bilinear"], "bicubic": PIL_INTERPOLATION["bicubic"], "lanczos": PIL_INTERPOLATION["lanczos"], }[interpolation] self.templates = imagenet_style_templates_small if learnable_property == "style" else imagenet_templates_small self.flip_transform = transforms.RandomHorizontalFlip(p=self.flip_p) def __len__(self): return self._length def __getitem__(self, i): example = {} image = Image.open(self.image_paths[i % self.num_images]) if not image.mode == "RGB": image = image.convert("RGB") placeholder_string = self.placeholder_token text = random.choice(self.templates).format(placeholder_string) example["input_ids"] = self.tokenizer( text, padding="max_length", truncation=True, max_length=self.tokenizer.model_max_length, return_tensors="pt", ).input_ids[0] # default to score-sde preprocessing img = np.array(image).astype(np.uint8) if self.center_crop: crop = min(img.shape[0], img.shape[1]) ( h, w, ) = ( img.shape[0], img.shape[1], ) img = img[(h - crop) // 2 : (h + crop) // 2, (w - crop) // 2 : (w + crop) // 2] image = Image.fromarray(img) image = image.resize((self.size, self.size), resample=self.interpolation) image = self.flip_transform(image) image = np.array(image).astype(np.uint8) image = (image / 127.5 - 1.0).astype(np.float32) example["pixel_values"] = torch.from_numpy(image).permute(2, 0, 1) return example def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None): if token is None: token = HfFolder.get_token() if organization is None: username = whoami(token)["name"] return f"{username}/{model_id}" else: return f"{organization}/{model_id}" def freeze_params(params): for param in params: param.requires_grad = False def image_grid(imgs, rows, cols): if not len(imgs) == rows * cols: raise ValueError("The specified number of rows and columns are not correct.") w, h = imgs[0].size grid = Image.new("RGB", size=(cols * w, rows * h)) grid_w, grid_h = grid.size for i, img in enumerate(imgs): grid.paste(img, box=(i % cols * w, i // cols * h)) return grid def generate_images(pipeline, prompt="", guidance_scale=7.5, num_inference_steps=50, num_images_per_prompt=1, seed=42): generator = torch.Generator(pipeline.device).manual_seed(seed) images = pipeline( prompt, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps, generator=generator, num_images_per_prompt=num_images_per_prompt, ).images _rows = int(math.sqrt(num_images_per_prompt)) grid = image_grid(images, rows=_rows, cols=num_images_per_prompt // _rows) return grid def main(): args = parse_args() logging_dir = os.path.join(args.output_dir, args.logging_dir) accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with="tensorboard", project_config=accelerator_project_config, ) # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.push_to_hub: if args.hub_model_id is None: repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token) else: repo_name = args.hub_model_id repo = Repository(args.output_dir, clone_from=repo_name) with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore: if "step_*" not in gitignore: gitignore.write("step_*\n") if "epoch_*" not in gitignore: gitignore.write("epoch_*\n") elif args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) # Load the tokenizer and add the placeholder token as a additional special token if args.tokenizer_name: tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name) elif args.pretrained_model_name_or_path: tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer") # Load models and create wrapper for stable diffusion noise_scheduler = DDPMScheduler.from_config(args.pretrained_model_name_or_path, subfolder="scheduler") text_encoder = CLIPTextModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, ) vae = AutoencoderKL.from_pretrained( args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision, ) unet = UNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision, ) train_unet = False # Freeze vae and unet freeze_params(vae.parameters()) if not args.do_quantization and not args.do_distillation: # Add the placeholder token in tokenizer num_added_tokens = tokenizer.add_tokens(args.placeholder_token) if num_added_tokens == 0: raise ValueError( f"The tokenizer already contains the token {args.placeholder_token}. Please pass a different" " `placeholder_token` that is not already in the tokenizer." ) # Convert the initializer_token, placeholder_token to ids token_ids = tokenizer.encode(args.initializer_token, add_special_tokens=False) # Check if initializer_token is a single token or a sequence of tokens if len(token_ids) > 1: raise ValueError("The initializer token must be a single token.") initializer_token_id = token_ids[0] placeholder_token_id = tokenizer.convert_tokens_to_ids(args.placeholder_token) # Resize the token embeddings as we are adding new special tokens to the tokenizer text_encoder.resize_token_embeddings(len(tokenizer)) # Initialise the newly added placeholder token with the embeddings of the initializer token token_embeds = text_encoder.get_input_embeddings().weight.data token_embeds[placeholder_token_id] = token_embeds[initializer_token_id] freeze_params(unet.parameters()) # Freeze all parameters except for the token embeddings in text encoder params_to_freeze = itertools.chain( text_encoder.text_model.encoder.parameters(), text_encoder.text_model.final_layer_norm.parameters(), text_encoder.text_model.embeddings.position_embedding.parameters(), ) freeze_params(params_to_freeze) else: train_unet = True freeze_params(text_encoder.parameters()) if args.scale_lr: args.learning_rate = ( args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes ) # Initialize the optimizer optimizer = torch.optim.AdamW( # only optimize the unet or embeddings of text_encoder unet.parameters() if train_unet else text_encoder.get_input_embeddings().parameters(), lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) train_dataset = TextualInversionDataset( data_root=args.train_data_dir, tokenizer=tokenizer, size=args.resolution, placeholder_token=args.placeholder_token, repeats=args.repeats, learnable_property=args.learnable_property, center_crop=args.center_crop, set="train", ) train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=args.train_batch_size, shuffle=True) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes, num_training_steps=args.max_train_steps * accelerator.num_processes, ) if not train_unet: text_encoder = accelerator.prepare(text_encoder) unet.to(accelerator.device) unet.eval() else: unet = accelerator.prepare(unet) text_encoder.to(accelerator.device) text_encoder.eval() optimizer, train_dataloader, lr_scheduler = accelerator.prepare(optimizer, train_dataloader, lr_scheduler) # Move vae to device vae.to(accelerator.device) # Keep vae in eval model as we don't train these vae.eval() compression_manager = None def train_func(model): if train_unet: unet_ = model text_encoder_ = text_encoder else: unet_ = unet text_encoder_ = model # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: accelerator.init_trackers("textual_inversion", config=vars(args)) # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") # Only show the progress bar once on each machine. progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) progress_bar.set_description("Steps") global_step = 0 if train_unet and args.use_ema: ema_unet = EMAModel(unet_.parameters()) for epoch in range(args.num_train_epochs): model.train() train_loss = 0.0 for step, batch in enumerate(train_dataloader): with accelerator.accumulate(model): # Convert images to latent space latents = vae.encode(batch["pixel_values"]).latent_dist.sample().detach() latents = latents * 0.18215 # Sample noise that we'll add to the latents noise = torch.randn(latents.shape).to(latents.device) bsz = latents.shape[0] # Sample a random timestep for each image timesteps = torch.randint( 0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device ).long() # Add noise to the latents according to the noise magnitude at each timestep # (this is the forward diffusion process) noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) # Get the text embedding for conditioning encoder_hidden_states = text_encoder_(batch["input_ids"])[0] # Predict the noise residual model_pred = unet_(noisy_latents, timesteps, encoder_hidden_states).sample loss = F.mse_loss(model_pred, noise, reduction="none").mean([1, 2, 3]).mean() if train_unet and compression_manager: unet_inputs = { "sample": noisy_latents, "timestep": timesteps, "encoder_hidden_states": encoder_hidden_states, } loss = compression_manager.callbacks.on_after_compute_loss(unet_inputs, model_pred, loss) # Gather the losses across all processes for logging (if we use distributed training). avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean() train_loss += avg_loss.item() / args.gradient_accumulation_steps # Backpropagate accelerator.backward(loss) if train_unet: if accelerator.sync_gradients: accelerator.clip_grad_norm_(unet_.parameters(), args.max_grad_norm) else: # Zero out the gradients for all token embeddings except the newly added # embeddings for the concept, as we only want to optimize the concept embeddings if accelerator.num_processes > 1: grads = text_encoder_.module.get_input_embeddings().weight.grad else: grads = text_encoder_.get_input_embeddings().weight.grad # Get the index for tokens that we want to zero the grads for index_grads_to_zero = torch.arange(len(tokenizer)) != placeholder_token_id grads.data[index_grads_to_zero, :] = grads.data[index_grads_to_zero, :].fill_(0) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: if train_unet and args.use_ema: ema_unet.step(unet_.parameters()) progress_bar.update(1) global_step += 1 accelerator.log({"train_loss": train_loss}, step=global_step) train_loss = 0.0 if not train_unet and global_step % args.save_steps == 0: save_path = os.path.join(args.output_dir, f"learned_embeds-steps-{global_step}.bin") save_progress(text_encoder_, placeholder_token_id, accelerator, args, save_path) logs = {"step_loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) accelerator.log(logs, step=global_step) if global_step >= args.max_train_steps: break accelerator.wait_for_everyone() if train_unet and args.use_ema: ema_unet.copy_to(unet_.parameters()) if not train_unet: return text_encoder_ if not train_unet: text_encoder = train_func(text_encoder) else: import copy model = copy.deepcopy(unet) confs = [] if args.do_quantization: from neural_compressor import QuantizationAwareTrainingConfig q_conf = QuantizationAwareTrainingConfig() confs.append(q_conf) if args.do_distillation: teacher_model = copy.deepcopy(model) def attention_fetcher(x): return x.sample layer_mappings = [ [ [ "conv_in", ] ], [ [ "time_embedding", ] ], [["down_blocks.0.attentions.0", attention_fetcher]], [["down_blocks.0.attentions.1", attention_fetcher]], [ [ "down_blocks.0.resnets.0", ] ], [ [ "down_blocks.0.resnets.1", ] ], [ [ "down_blocks.0.downsamplers.0", ] ], [["down_blocks.1.attentions.0", attention_fetcher]], [["down_blocks.1.attentions.1", attention_fetcher]], [ [ "down_blocks.1.resnets.0", ] ], [ [ "down_blocks.1.resnets.1", ] ], [ [ "down_blocks.1.downsamplers.0", ] ], [["down_blocks.2.attentions.0", attention_fetcher]], [["down_blocks.2.attentions.1", attention_fetcher]], [ [ "down_blocks.2.resnets.0", ] ], [ [ "down_blocks.2.resnets.1", ] ], [ [ "down_blocks.2.downsamplers.0", ] ], [ [ "down_blocks.3.resnets.0", ] ], [ [ "down_blocks.3.resnets.1", ] ], [ [ "up_blocks.0.resnets.0", ] ], [ [ "up_blocks.0.resnets.1", ] ], [ [ "up_blocks.0.resnets.2", ] ], [ [ "up_blocks.0.upsamplers.0", ] ], [["up_blocks.1.attentions.0", attention_fetcher]], [["up_blocks.1.attentions.1", attention_fetcher]], [["up_blocks.1.attentions.2", attention_fetcher]], [ [ "up_blocks.1.resnets.0", ] ], [ [ "up_blocks.1.resnets.1", ] ], [ [ "up_blocks.1.resnets.2", ] ], [ [ "up_blocks.1.upsamplers.0", ] ], [["up_blocks.2.attentions.0", attention_fetcher]], [["up_blocks.2.attentions.1", attention_fetcher]], [["up_blocks.2.attentions.2", attention_fetcher]], [ [ "up_blocks.2.resnets.0", ] ], [ [ "up_blocks.2.resnets.1", ] ], [ [ "up_blocks.2.resnets.2", ] ], [ [ "up_blocks.2.upsamplers.0", ] ], [["up_blocks.3.attentions.0", attention_fetcher]], [["up_blocks.3.attentions.1", attention_fetcher]], [["up_blocks.3.attentions.2", attention_fetcher]], [ [ "up_blocks.3.resnets.0", ] ], [ [ "up_blocks.3.resnets.1", ] ], [ [ "up_blocks.3.resnets.2", ] ], [["mid_block.attentions.0", attention_fetcher]], [ [ "mid_block.resnets.0", ] ], [ [ "mid_block.resnets.1", ] ], [ [ "conv_out", ] ], ] layer_names = [layer_mapping[0][0] for layer_mapping in layer_mappings] if not set(layer_names).issubset([n[0] for n in model.named_modules()]): raise ValueError( "Provided model is not compatible with the default layer_mappings, " 'please use the model fine-tuned from "CompVis/stable-diffusion-v1-4", ' "or modify the layer_mappings variable to fit your model." f"\nDefault layer_mappings are as such:\n{layer_mappings}" ) from neural_compressor.config import DistillationConfig, IntermediateLayersKnowledgeDistillationLossConfig distillation_criterion = IntermediateLayersKnowledgeDistillationLossConfig( layer_mappings=layer_mappings, loss_types=["MSE"] * len(layer_mappings), loss_weights=[1.0 / len(layer_mappings)] * len(layer_mappings), add_origin_loss=True, ) d_conf = DistillationConfig(teacher_model=teacher_model, criterion=distillation_criterion) confs.append(d_conf) from neural_compressor.training import prepare_compression compression_manager = prepare_compression(model, confs) compression_manager.callbacks.on_train_begin() model = compression_manager.model train_func(model) compression_manager.callbacks.on_train_end() # Save the resulting model and its corresponding configuration in the given directory model.save(args.output_dir) logger.info(f"Optimized model saved to: {args.output_dir}.") # change to framework model for further use model = model.model # Create the pipeline using using the trained modules and save it. templates = imagenet_style_templates_small if args.learnable_property == "style" else imagenet_templates_small prompt = templates[0].format(args.placeholder_token) if accelerator.is_main_process: pipeline = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=accelerator.unwrap_model(text_encoder), vae=vae, unet=accelerator.unwrap_model(unet), tokenizer=tokenizer, ) pipeline.save_pretrained(args.output_dir) pipeline = pipeline.to(unet.device) baseline_model_images = generate_images(pipeline, prompt=prompt, seed=args.seed) baseline_model_images.save( os.path.join(args.output_dir, "{}_baseline_model.png".format("_".join(prompt.split()))) ) if not train_unet: # Also save the newly trained embeddings save_path = os.path.join(args.output_dir, "learned_embeds.bin") save_progress(text_encoder, placeholder_token_id, accelerator, args, save_path) else: setattr(pipeline, "unet", accelerator.unwrap_model(model)) if args.do_quantization: pipeline = pipeline.to(torch.device("cpu")) optimized_model_images = generate_images(pipeline, prompt=prompt, seed=args.seed) optimized_model_images.save( os.path.join(args.output_dir, "{}_optimized_model.png".format("_".join(prompt.split()))) ) if args.push_to_hub: repo.push_to_hub(commit_message="End of training", blocking=False, auto_lfs_prune=True) accelerator.end_training() if args.do_quantization and args.verify_loading: # Load the model obtained after Intel Neural Compressor quantization from neural_compressor.utils.pytorch import load loaded_model = load(args.output_dir, model=unet) loaded_model.eval() setattr(pipeline, "unet", loaded_model) if args.do_quantization: pipeline = pipeline.to(torch.device("cpu")) loaded_model_images = generate_images(pipeline, prompt=prompt, seed=args.seed) if loaded_model_images != optimized_model_images: logger.info("The quantized model was not successfully loaded.") else: logger.info("The quantized model was successfully loaded.") if __name__ == "__main__": main()
0
hf_public_repos/diffusers/examples/research_projects/intel_opts
hf_public_repos/diffusers/examples/research_projects/intel_opts/textual_inversion_dfq/text2images.py
import argparse import math import os import torch from neural_compressor.utils.pytorch import load from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, StableDiffusionPipeline, UNet2DConditionModel def parse_args(): parser = argparse.ArgumentParser() parser.add_argument( "-m", "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "-c", "--caption", type=str, default="robotic cat with wings", help="Text used to generate images.", ) parser.add_argument( "-n", "--images_num", type=int, default=4, help="How much images to generate.", ) parser.add_argument( "-s", "--seed", type=int, default=42, help="Seed for random process.", ) parser.add_argument( "-ci", "--cuda_id", type=int, default=0, help="cuda_id.", ) args = parser.parse_args() return args def image_grid(imgs, rows, cols): if not len(imgs) == rows * cols: raise ValueError("The specified number of rows and columns are not correct.") w, h = imgs[0].size grid = Image.new("RGB", size=(cols * w, rows * h)) grid_w, grid_h = grid.size for i, img in enumerate(imgs): grid.paste(img, box=(i % cols * w, i // cols * h)) return grid def generate_images( pipeline, prompt="robotic cat with wings", guidance_scale=7.5, num_inference_steps=50, num_images_per_prompt=1, seed=42, ): generator = torch.Generator(pipeline.device).manual_seed(seed) images = pipeline( prompt, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps, generator=generator, num_images_per_prompt=num_images_per_prompt, ).images _rows = int(math.sqrt(num_images_per_prompt)) grid = image_grid(images, rows=_rows, cols=num_images_per_prompt // _rows) return grid, images args = parse_args() # Load models and create wrapper for stable diffusion tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer") text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder") vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae") unet = UNet2DConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet") pipeline = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer ) pipeline.safety_checker = lambda images, clip_input: (images, False) if os.path.exists(os.path.join(args.pretrained_model_name_or_path, "best_model.pt")): unet = load(args.pretrained_model_name_or_path, model=unet) unet.eval() setattr(pipeline, "unet", unet) else: unet = unet.to(torch.device("cuda", args.cuda_id)) pipeline = pipeline.to(unet.device) grid, images = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed) grid.save(os.path.join(args.pretrained_model_name_or_path, "{}.png".format("_".join(args.caption.split())))) dirname = os.path.join(args.pretrained_model_name_or_path, "_".join(args.caption.split())) os.makedirs(dirname, exist_ok=True) for idx, image in enumerate(images): image.save(os.path.join(dirname, "{}.png".format(idx + 1)))
0
hf_public_repos/diffusers/examples/research_projects
hf_public_repos/diffusers/examples/research_projects/mulit_token_textual_inversion/requirements.txt
accelerate>=0.16.0 torchvision transformers>=4.25.1 ftfy tensorboard Jinja2
0
hf_public_repos/diffusers/examples/research_projects
hf_public_repos/diffusers/examples/research_projects/mulit_token_textual_inversion/multi_token_clip.py
""" The main idea for this code is to provide a way for users to not need to bother with the hassle of multiple tokens for a concept by typing a photo of <concept>_0 <concept>_1 ... and so on and instead just do a photo of <concept> which gets translated to the above. This needs to work for both inference and training. For inference, the tokenizer encodes the text. So, we would want logic for our tokenizer to replace the placeholder token with it's underlying vectors For training, we would want to abstract away some logic like 1. Adding tokens 2. Updating gradient mask 3. Saving embeddings to our Util class here. so TODO: 1. have tokenizer keep track of concept, multiconcept pairs and replace during encode call x 2. have mechanism for adding tokens x 3. have mech for saving emebeddings x 4. get mask to update x 5. Loading tokens from embedding x 6. Integrate to training x 7. Test """ import copy import random from transformers import CLIPTokenizer class MultiTokenCLIPTokenizer(CLIPTokenizer): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.token_map = {} def try_adding_tokens(self, placeholder_token, *args, **kwargs): num_added_tokens = super().add_tokens(placeholder_token, *args, **kwargs) if num_added_tokens == 0: raise ValueError( f"The tokenizer already contains the token {placeholder_token}. Please pass a different" " `placeholder_token` that is not already in the tokenizer." ) def add_placeholder_tokens(self, placeholder_token, *args, num_vec_per_token=1, **kwargs): output = [] if num_vec_per_token == 1: self.try_adding_tokens(placeholder_token, *args, **kwargs) output.append(placeholder_token) else: output = [] for i in range(num_vec_per_token): ith_token = placeholder_token + f"_{i}" self.try_adding_tokens(ith_token, *args, **kwargs) output.append(ith_token) # handle cases where there is a new placeholder token that contains the current placeholder token but is larger for token in self.token_map: if token in placeholder_token: raise ValueError( f"The tokenizer already has placeholder token {token} that can get confused with" f" {placeholder_token}keep placeholder tokens independent" ) self.token_map[placeholder_token] = output def replace_placeholder_tokens_in_text(self, text, vector_shuffle=False, prop_tokens_to_load=1.0): """ Here, we replace the placeholder tokens in text recorded in token_map so that the text_encoder can encode them vector_shuffle was inspired by https://github.com/rinongal/textual_inversion/pull/119 where shuffling tokens were found to force the model to learn the concepts more descriptively. """ if isinstance(text, list): output = [] for i in range(len(text)): output.append(self.replace_placeholder_tokens_in_text(text[i], vector_shuffle=vector_shuffle)) return output for placeholder_token in self.token_map: if placeholder_token in text: tokens = self.token_map[placeholder_token] tokens = tokens[: 1 + int(len(tokens) * prop_tokens_to_load)] if vector_shuffle: tokens = copy.copy(tokens) random.shuffle(tokens) text = text.replace(placeholder_token, " ".join(tokens)) return text def __call__(self, text, *args, vector_shuffle=False, prop_tokens_to_load=1.0, **kwargs): return super().__call__( self.replace_placeholder_tokens_in_text( text, vector_shuffle=vector_shuffle, prop_tokens_to_load=prop_tokens_to_load ), *args, **kwargs, ) def encode(self, text, *args, vector_shuffle=False, prop_tokens_to_load=1.0, **kwargs): return super().encode( self.replace_placeholder_tokens_in_text( text, vector_shuffle=vector_shuffle, prop_tokens_to_load=prop_tokens_to_load ), *args, **kwargs, )
0
hf_public_repos/diffusers/examples/research_projects
hf_public_repos/diffusers/examples/research_projects/mulit_token_textual_inversion/README.md
## [Deprecated] Multi Token Textual Inversion **IMPORTART: This research project is deprecated. Multi Token Textual Inversion is now supported natively in [the officail textual inversion example](https://github.com/huggingface/diffusers/tree/main/examples/textual_inversion#running-locally-with-pytorch).** The author of this project is [Isamu Isozaki](https://github.com/isamu-isozaki) - please make sure to tag the author for issue and PRs as well as @patrickvonplaten. We add multi token support to textual inversion. I added 1. num_vec_per_token for the number of used to reference that token 2. progressive_tokens for progressively training the token from 1 token to 2 token etc 3. progressive_tokens_max_steps for the max number of steps until we start full training 4. vector_shuffle to shuffle vectors Feel free to add these options to your training! In practice num_vec_per_token around 10+vector shuffle works great! ## Textual Inversion fine-tuning example [Textual inversion](https://arxiv.org/abs/2208.01618) is a method to personalize text2image models like stable diffusion on your own images using just 3-5 examples. The `textual_inversion.py` script shows how to implement the training procedure and adapt it for stable diffusion. ## Running on Colab Colab for training [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb) Colab for inference [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb) ## Running locally with PyTorch ### Installing the dependencies Before running the scripts, make sure to install the library's training dependencies: **Important** To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install . ``` Then cd in the example folder and run ```bash pip install -r requirements.txt ``` And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with: ```bash accelerate config ``` ### Cat toy example You need to accept the model license before downloading or using the weights. In this example we'll use model version `v1-5`, so you'll need to visit [its card](https://huggingface.co/runwayml/stable-diffusion-v1-5), read the license and tick the checkbox if you agree. You have to be a registered user in 🤗 Hugging Face Hub, and you'll also need to use an access token for the code to work. For more information on access tokens, please refer to [this section of the documentation](https://huggingface.co/docs/hub/security-tokens). Run the following command to authenticate your token ```bash huggingface-cli login ``` If you have already cloned the repo, then you won't need to go through these steps. <br> Now let's get our dataset.Download 3-4 images from [here](https://drive.google.com/drive/folders/1fmJMs25nxS_rSNqS5hTcRdLem_YQXbq5) and save them in a directory. This will be our training data. And launch the training using **___Note: Change the `resolution` to 768 if you are using the [stable-diffusion-2](https://huggingface.co/stabilityai/stable-diffusion-2) 768x768 model.___** ```bash export MODEL_NAME="runwayml/stable-diffusion-v1-5" export DATA_DIR="path-to-dir-containing-images" accelerate launch textual_inversion.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --train_data_dir=$DATA_DIR \ --learnable_property="object" \ --placeholder_token="<cat-toy>" --initializer_token="toy" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --max_train_steps=3000 \ --learning_rate=5.0e-04 --scale_lr \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --output_dir="textual_inversion_cat" ``` A full training run takes ~1 hour on one V100 GPU. ### Inference Once you have trained a model using above command, the inference can be done simply using the `StableDiffusionPipeline`. Make sure to include the `placeholder_token` in your prompt. ```python from diffusers import StableDiffusionPipeline model_id = "path-to-your-trained-model" pipe = StableDiffusionPipeline.from_pretrained(model_id,torch_dtype=torch.float16).to("cuda") prompt = "A <cat-toy> backpack" image = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] image.save("cat-backpack.png") ``` ## Training with Flax/JAX For faster training on TPUs and GPUs you can leverage the flax training example. Follow the instructions above to get the model and dataset before running the script. Before running the scripts, make sure to install the library's training dependencies: ```bash pip install -U -r requirements_flax.txt ``` ```bash export MODEL_NAME="duongna/stable-diffusion-v1-4-flax" export DATA_DIR="path-to-dir-containing-images" python textual_inversion_flax.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --train_data_dir=$DATA_DIR \ --learnable_property="object" \ --placeholder_token="<cat-toy>" --initializer_token="toy" \ --resolution=512 \ --train_batch_size=1 \ --max_train_steps=3000 \ --learning_rate=5.0e-04 --scale_lr \ --output_dir="textual_inversion_cat" ``` It should be at least 70% faster than the PyTorch script with the same configuration. ### Training with xformers: You can enable memory efficient attention by [installing xFormers](https://github.com/facebookresearch/xformers#installing-xformers) and padding the `--enable_xformers_memory_efficient_attention` argument to the script. This is not available with the Flax/JAX implementation.
0
hf_public_repos/diffusers/examples/research_projects
hf_public_repos/diffusers/examples/research_projects/mulit_token_textual_inversion/textual_inversion.py
#!/usr/bin/env python # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and import argparse import logging import math import os import random from pathlib import Path import numpy as np import PIL import torch import torch.nn.functional as F import torch.utils.checkpoint import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import ProjectConfiguration, set_seed from huggingface_hub import create_repo, upload_folder from multi_token_clip import MultiTokenCLIPTokenizer # TODO: remove and import from diffusers.utils when the new version of diffusers is released from packaging import version from PIL import Image from torch.utils.data import Dataset from torchvision import transforms from tqdm.auto import tqdm from transformers import CLIPTextModel import diffusers from diffusers import ( AutoencoderKL, DDPMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, StableDiffusionPipeline, UNet2DConditionModel, ) from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version, is_wandb_available from diffusers.utils.import_utils import is_xformers_available if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"): PIL_INTERPOLATION = { "linear": PIL.Image.Resampling.BILINEAR, "bilinear": PIL.Image.Resampling.BILINEAR, "bicubic": PIL.Image.Resampling.BICUBIC, "lanczos": PIL.Image.Resampling.LANCZOS, "nearest": PIL.Image.Resampling.NEAREST, } else: PIL_INTERPOLATION = { "linear": PIL.Image.LINEAR, "bilinear": PIL.Image.BILINEAR, "bicubic": PIL.Image.BICUBIC, "lanczos": PIL.Image.LANCZOS, "nearest": PIL.Image.NEAREST, } # ------------------------------------------------------------------------------ # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.14.0.dev0") logger = get_logger(__name__) def add_tokens(tokenizer, text_encoder, placeholder_token, num_vec_per_token=1, initializer_token=None): """ Add tokens to the tokenizer and set the initial value of token embeddings """ tokenizer.add_placeholder_tokens(placeholder_token, num_vec_per_token=num_vec_per_token) text_encoder.resize_token_embeddings(len(tokenizer)) token_embeds = text_encoder.get_input_embeddings().weight.data placeholder_token_ids = tokenizer.encode(placeholder_token, add_special_tokens=False) if initializer_token: token_ids = tokenizer.encode(initializer_token, add_special_tokens=False) for i, placeholder_token_id in enumerate(placeholder_token_ids): token_embeds[placeholder_token_id] = token_embeds[token_ids[i * len(token_ids) // num_vec_per_token]] else: for i, placeholder_token_id in enumerate(placeholder_token_ids): token_embeds[placeholder_token_id] = torch.randn_like(token_embeds[placeholder_token_id]) return placeholder_token def save_progress(tokenizer, text_encoder, accelerator, save_path): for placeholder_token in tokenizer.token_map: placeholder_token_ids = tokenizer.encode(placeholder_token, add_special_tokens=False) learned_embeds = accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[placeholder_token_ids] if len(placeholder_token_ids) == 1: learned_embeds = learned_embeds[None] learned_embeds_dict = {placeholder_token: learned_embeds.detach().cpu()} torch.save(learned_embeds_dict, save_path) def load_multitoken_tokenizer(tokenizer, text_encoder, learned_embeds_dict): for placeholder_token in learned_embeds_dict: placeholder_embeds = learned_embeds_dict[placeholder_token] num_vec_per_token = placeholder_embeds.shape[0] placeholder_embeds = placeholder_embeds.to(dtype=text_encoder.dtype) add_tokens(tokenizer, text_encoder, placeholder_token, num_vec_per_token=num_vec_per_token) placeholder_token_ids = tokenizer.encode(placeholder_token, add_special_tokens=False) token_embeds = text_encoder.get_input_embeddings().weight.data for i, placeholder_token_id in enumerate(placeholder_token_ids): token_embeds[placeholder_token_id] = placeholder_embeds[i] def load_multitoken_tokenizer_from_automatic(tokenizer, text_encoder, automatic_dict, placeholder_token): """ Automatic1111's tokens have format {'string_to_token': {'*': 265}, 'string_to_param': {'*': tensor([[ 0.0833, 0.0030, 0.0057, ..., -0.0264, -0.0616, -0.0529], [ 0.0058, -0.0190, -0.0584, ..., -0.0025, -0.0945, -0.0490], [ 0.0916, 0.0025, 0.0365, ..., -0.0685, -0.0124, 0.0728], [ 0.0812, -0.0199, -0.0100, ..., -0.0581, -0.0780, 0.0254]], requires_grad=True)}, 'name': 'FloralMarble-400', 'step': 399, 'sd_checkpoint': '4bdfc29c', 'sd_checkpoint_name': 'SD2.1-768'} """ learned_embeds_dict = {} learned_embeds_dict[placeholder_token] = automatic_dict["string_to_param"]["*"] load_multitoken_tokenizer(tokenizer, text_encoder, learned_embeds_dict) def get_mask(tokenizer, accelerator): # Get the mask of the weights that won't change mask = torch.ones(len(tokenizer)).to(accelerator.device, dtype=torch.bool) for placeholder_token in tokenizer.token_map: placeholder_token_ids = tokenizer.encode(placeholder_token, add_special_tokens=False) for i in range(len(placeholder_token_ids)): mask = mask & (torch.arange(len(tokenizer)) != placeholder_token_ids[i]).to(accelerator.device) return mask def parse_args(): parser = argparse.ArgumentParser(description="Simple example of a training script.") parser.add_argument( "--progressive_tokens_max_steps", type=int, default=2000, help="The number of steps until all tokens will be used.", ) parser.add_argument( "--progressive_tokens", action="store_true", help="Progressively train the tokens. For example, first train for 1 token, then 2 tokens and so on.", ) parser.add_argument("--vector_shuffle", action="store_true", help="Shuffling tokens durint training") parser.add_argument( "--num_vec_per_token", type=int, default=1, help=( "The number of vectors used to represent the placeholder token. The higher the number, the better the" " result at the cost of editability. This can be fixed by prompt editing." ), ) parser.add_argument( "--save_steps", type=int, default=500, help="Save learned_embeds.bin every X updates steps.", ) parser.add_argument( "--only_save_embeds", action="store_true", default=False, help="Save only the embeddings for the new concept.", ) parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--train_data_dir", type=str, default=None, required=True, help="A folder containing the training data." ) parser.add_argument( "--placeholder_token", type=str, default=None, required=True, help="A token to use as a placeholder for the concept.", ) parser.add_argument( "--initializer_token", type=str, default=None, required=True, help="A token to use as initializer word." ) parser.add_argument("--learnable_property", type=str, default="object", help="Choose between 'object' and 'style'") parser.add_argument("--repeats", type=int, default=100, help="How many times to repeat the training data.") parser.add_argument( "--output_dir", type=str, default="text-inversion-model", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution." ) parser.add_argument( "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." ) parser.add_argument("--num_train_epochs", type=int, default=100) parser.add_argument( "--max_train_steps", type=int, default=5000, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--learning_rate", type=float, default=1e-4, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--dataloader_num_workers", type=int, default=0, help=( "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." ), ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--mixed_precision", type=str, default="no", choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." ), ) parser.add_argument( "--allow_tf32", action="store_true", help=( "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" ), ) parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) parser.add_argument( "--validation_prompt", type=str, default=None, help="A prompt that is used during validation to verify that the model is learning.", ) parser.add_argument( "--num_validation_images", type=int, default=4, help="Number of images that should be generated during validation with `validation_prompt`.", ) parser.add_argument( "--validation_epochs", type=int, default=50, help=( "Run validation every X epochs. Validation consists of running the prompt" " `args.validation_prompt` multiple times: `args.num_validation_images`" " and logging the images." ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming" " training using `--resume_from_checkpoint`." ), ) parser.add_argument( "--checkpoints_total_limit", type=int, default=None, help=( "Max number of checkpoints to store. Passed as `total_limit` to the `Accelerator` `ProjectConfiguration`." " See Accelerator::save_state https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.save_state" " for more docs" ), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) parser.add_argument( "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." ) args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank if args.train_data_dir is None: raise ValueError("You must specify a train data directory.") return args imagenet_templates_small = [ "a photo of a {}", "a rendering of a {}", "a cropped photo of the {}", "the photo of a {}", "a photo of a clean {}", "a photo of a dirty {}", "a dark photo of the {}", "a photo of my {}", "a photo of the cool {}", "a close-up photo of a {}", "a bright photo of the {}", "a cropped photo of a {}", "a photo of the {}", "a good photo of the {}", "a photo of one {}", "a close-up photo of the {}", "a rendition of the {}", "a photo of the clean {}", "a rendition of a {}", "a photo of a nice {}", "a good photo of a {}", "a photo of the nice {}", "a photo of the small {}", "a photo of the weird {}", "a photo of the large {}", "a photo of a cool {}", "a photo of a small {}", ] imagenet_style_templates_small = [ "a painting in the style of {}", "a rendering in the style of {}", "a cropped painting in the style of {}", "the painting in the style of {}", "a clean painting in the style of {}", "a dirty painting in the style of {}", "a dark painting in the style of {}", "a picture in the style of {}", "a cool painting in the style of {}", "a close-up painting in the style of {}", "a bright painting in the style of {}", "a cropped painting in the style of {}", "a good painting in the style of {}", "a close-up painting in the style of {}", "a rendition in the style of {}", "a nice painting in the style of {}", "a small painting in the style of {}", "a weird painting in the style of {}", "a large painting in the style of {}", ] class TextualInversionDataset(Dataset): def __init__( self, data_root, tokenizer, learnable_property="object", # [object, style] size=512, repeats=100, interpolation="bicubic", flip_p=0.5, set="train", placeholder_token="*", center_crop=False, vector_shuffle=False, progressive_tokens=False, ): self.data_root = data_root self.tokenizer = tokenizer self.learnable_property = learnable_property self.size = size self.placeholder_token = placeholder_token self.center_crop = center_crop self.flip_p = flip_p self.vector_shuffle = vector_shuffle self.progressive_tokens = progressive_tokens self.prop_tokens_to_load = 0 self.image_paths = [os.path.join(self.data_root, file_path) for file_path in os.listdir(self.data_root)] self.num_images = len(self.image_paths) self._length = self.num_images if set == "train": self._length = self.num_images * repeats self.interpolation = { "linear": PIL_INTERPOLATION["linear"], "bilinear": PIL_INTERPOLATION["bilinear"], "bicubic": PIL_INTERPOLATION["bicubic"], "lanczos": PIL_INTERPOLATION["lanczos"], }[interpolation] self.templates = imagenet_style_templates_small if learnable_property == "style" else imagenet_templates_small self.flip_transform = transforms.RandomHorizontalFlip(p=self.flip_p) def __len__(self): return self._length def __getitem__(self, i): example = {} image = Image.open(self.image_paths[i % self.num_images]) if not image.mode == "RGB": image = image.convert("RGB") placeholder_string = self.placeholder_token text = random.choice(self.templates).format(placeholder_string) example["input_ids"] = self.tokenizer.encode( text, padding="max_length", truncation=True, max_length=self.tokenizer.model_max_length, return_tensors="pt", vector_shuffle=self.vector_shuffle, prop_tokens_to_load=self.prop_tokens_to_load if self.progressive_tokens else 1.0, )[0] # default to score-sde preprocessing img = np.array(image).astype(np.uint8) if self.center_crop: crop = min(img.shape[0], img.shape[1]) ( h, w, ) = ( img.shape[0], img.shape[1], ) img = img[(h - crop) // 2 : (h + crop) // 2, (w - crop) // 2 : (w + crop) // 2] image = Image.fromarray(img) image = image.resize((self.size, self.size), resample=self.interpolation) image = self.flip_transform(image) image = np.array(image).astype(np.uint8) image = (image / 127.5 - 1.0).astype(np.float32) example["pixel_values"] = torch.from_numpy(image).permute(2, 0, 1) return example def main(): args = parse_args() logging_dir = os.path.join(args.output_dir, args.logging_dir) accelerator_project_config = ProjectConfiguration( total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir ) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_config=accelerator_project_config, ) if args.report_to == "wandb": if not is_wandb_available(): raise ImportError("Make sure to install wandb if you want to use it for logging during training.") import wandb # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token ).repo_id # Load tokenizer if args.tokenizer_name: tokenizer = MultiTokenCLIPTokenizer.from_pretrained(args.tokenizer_name) elif args.pretrained_model_name_or_path: tokenizer = MultiTokenCLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer") # Load scheduler and models noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") text_encoder = CLIPTextModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision ) vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision) unet = UNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision ) if is_xformers_available(): try: unet.enable_xformers_memory_efficient_attention() except Exception as e: logger.warning( "Could not enable memory efficient attention. Make sure xformers is installed" f" correctly and a GPU is available: {e}" ) add_tokens(tokenizer, text_encoder, args.placeholder_token, args.num_vec_per_token, args.initializer_token) # Freeze vae and unet vae.requires_grad_(False) unet.requires_grad_(False) # Freeze all parameters except for the token embeddings in text encoder text_encoder.text_model.encoder.requires_grad_(False) text_encoder.text_model.final_layer_norm.requires_grad_(False) text_encoder.text_model.embeddings.position_embedding.requires_grad_(False) if args.gradient_checkpointing: # Keep unet in train mode if we are using gradient checkpointing to save memory. # The dropout cannot be != 0 so it doesn't matter if we are in eval or train mode. unet.train() text_encoder.gradient_checkpointing_enable() unet.enable_gradient_checkpointing() if args.enable_xformers_memory_efficient_attention: if is_xformers_available(): import xformers xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): logger.warn( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if args.allow_tf32: torch.backends.cuda.matmul.allow_tf32 = True if args.scale_lr: args.learning_rate = ( args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes ) # Initialize the optimizer optimizer = torch.optim.AdamW( text_encoder.get_input_embeddings().parameters(), # only optimize the embeddings lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) # Dataset and DataLoaders creation: train_dataset = TextualInversionDataset( data_root=args.train_data_dir, tokenizer=tokenizer, size=args.resolution, placeholder_token=args.placeholder_token, repeats=args.repeats, learnable_property=args.learnable_property, center_crop=args.center_crop, set="train", ) train_dataloader = torch.utils.data.DataLoader( train_dataset, batch_size=args.train_batch_size, shuffle=True, num_workers=args.dataloader_num_workers ) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes, num_training_steps=args.max_train_steps * accelerator.num_processes, ) # Prepare everything with our `accelerator`. text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( text_encoder, optimizer, train_dataloader, lr_scheduler ) # For mixed precision training we cast the unet and vae weights to half-precision # as these models are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # Move vae and unet to device and cast to weight_dtype unet.to(accelerator.device, dtype=weight_dtype) vae.to(accelerator.device, dtype=weight_dtype) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: accelerator.init_trackers("textual_inversion", config=vars(args)) # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 first_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint != "latest": path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = os.listdir(args.output_dir) dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) path = dirs[-1] if len(dirs) > 0 else None if path is None: accelerator.print( f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." ) args.resume_from_checkpoint = None else: accelerator.print(f"Resuming from checkpoint {path}") accelerator.load_state(os.path.join(args.output_dir, path)) global_step = int(path.split("-")[1]) resume_global_step = global_step * args.gradient_accumulation_steps first_epoch = global_step // num_update_steps_per_epoch resume_step = resume_global_step % (num_update_steps_per_epoch * args.gradient_accumulation_steps) # Only show the progress bar once on each machine. progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process) progress_bar.set_description("Steps") # keep original embeddings as reference orig_embeds_params = accelerator.unwrap_model(text_encoder).get_input_embeddings().weight.data.clone() for epoch in range(first_epoch, args.num_train_epochs): text_encoder.train() for step, batch in enumerate(train_dataloader): # Skip steps until we reach the resumed step if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step: if step % args.gradient_accumulation_steps == 0: progress_bar.update(1) continue if args.progressive_tokens: train_dataset.prop_tokens_to_load = float(global_step) / args.progressive_tokens_max_steps with accelerator.accumulate(text_encoder): # Convert images to latent space latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample().detach() latents = latents * vae.config.scaling_factor # Sample noise that we'll add to the latents noise = torch.randn_like(latents) bsz = latents.shape[0] # Sample a random timestep for each image timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device) timesteps = timesteps.long() # Add noise to the latents according to the noise magnitude at each timestep # (this is the forward diffusion process) noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) # Get the text embedding for conditioning encoder_hidden_states = text_encoder(batch["input_ids"])[0].to(dtype=weight_dtype) # Predict the noise residual model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample # Get the target for loss depending on the prediction type if noise_scheduler.config.prediction_type == "epsilon": target = noise elif noise_scheduler.config.prediction_type == "v_prediction": target = noise_scheduler.get_velocity(latents, noise, timesteps) else: raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Let's make sure we don't update any embedding weights besides the newly added token index_no_updates = get_mask(tokenizer, accelerator) with torch.no_grad(): accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[ index_no_updates ] = orig_embeds_params[index_no_updates] # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: progress_bar.update(1) global_step += 1 if global_step % args.save_steps == 0: save_path = os.path.join(args.output_dir, f"learned_embeds-steps-{global_step}.bin") save_progress(tokenizer, text_encoder, accelerator, save_path) if global_step % args.checkpointing_steps == 0: if accelerator.is_main_process: save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") accelerator.save_state(save_path) logger.info(f"Saved state to {save_path}") logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) accelerator.log(logs, step=global_step) if global_step >= args.max_train_steps: break if accelerator.is_main_process and args.validation_prompt is not None and epoch % args.validation_epochs == 0: logger.info( f"Running validation... \n Generating {args.num_validation_images} images with prompt:" f" {args.validation_prompt}." ) # create pipeline (note: unet and vae are loaded again in float32) pipeline = DiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=accelerator.unwrap_model(text_encoder), tokenizer=tokenizer, unet=unet, vae=vae, revision=args.revision, torch_dtype=weight_dtype, ) pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config) pipeline = pipeline.to(accelerator.device) pipeline.set_progress_bar_config(disable=True) # run inference generator = ( None if args.seed is None else torch.Generator(device=accelerator.device).manual_seed(args.seed) ) images = [] for _ in range(args.num_validation_images): with torch.autocast("cuda"): image = pipeline(args.validation_prompt, num_inference_steps=25, generator=generator).images[0] images.append(image) for tracker in accelerator.trackers: if tracker.name == "tensorboard": np_images = np.stack([np.asarray(img) for img in images]) tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC") if tracker.name == "wandb": tracker.log( { "validation": [ wandb.Image(image, caption=f"{i}: {args.validation_prompt}") for i, image in enumerate(images) ] } ) del pipeline torch.cuda.empty_cache() # Create the pipeline using using the trained modules and save it. accelerator.wait_for_everyone() if accelerator.is_main_process: if args.push_to_hub and args.only_save_embeds: logger.warn("Enabling full model saving because --push_to_hub=True was specified.") save_full_model = True else: save_full_model = not args.only_save_embeds if save_full_model: pipeline = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=accelerator.unwrap_model(text_encoder), vae=vae, unet=unet, tokenizer=tokenizer, ) pipeline.save_pretrained(args.output_dir) # Save the newly trained embeddings save_path = os.path.join(args.output_dir, "learned_embeds.bin") save_progress(tokenizer, text_encoder, accelerator, save_path) if args.push_to_hub: upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) accelerator.end_training() if __name__ == "__main__": main()
0
hf_public_repos/diffusers/examples/research_projects
hf_public_repos/diffusers/examples/research_projects/mulit_token_textual_inversion/textual_inversion_flax.py
import argparse import logging import math import os import random from pathlib import Path import jax import jax.numpy as jnp import numpy as np import optax import PIL import torch import torch.utils.checkpoint import transformers from flax import jax_utils from flax.training import train_state from flax.training.common_utils import shard from huggingface_hub import create_repo, upload_folder # TODO: remove and import from diffusers.utils when the new version of diffusers is released from packaging import version from PIL import Image from torch.utils.data import Dataset from torchvision import transforms from tqdm.auto import tqdm from transformers import CLIPImageProcessor, CLIPTokenizer, FlaxCLIPTextModel, set_seed from diffusers import ( FlaxAutoencoderKL, FlaxDDPMScheduler, FlaxPNDMScheduler, FlaxStableDiffusionPipeline, FlaxUNet2DConditionModel, ) from diffusers.pipelines.stable_diffusion import FlaxStableDiffusionSafetyChecker from diffusers.utils import check_min_version if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"): PIL_INTERPOLATION = { "linear": PIL.Image.Resampling.BILINEAR, "bilinear": PIL.Image.Resampling.BILINEAR, "bicubic": PIL.Image.Resampling.BICUBIC, "lanczos": PIL.Image.Resampling.LANCZOS, "nearest": PIL.Image.Resampling.NEAREST, } else: PIL_INTERPOLATION = { "linear": PIL.Image.LINEAR, "bilinear": PIL.Image.BILINEAR, "bicubic": PIL.Image.BICUBIC, "lanczos": PIL.Image.LANCZOS, "nearest": PIL.Image.NEAREST, } # ------------------------------------------------------------------------------ # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.14.0.dev0") logger = logging.getLogger(__name__) def parse_args(): parser = argparse.ArgumentParser(description="Simple example of a training script.") parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--train_data_dir", type=str, default=None, required=True, help="A folder containing the training data." ) parser.add_argument( "--placeholder_token", type=str, default=None, required=True, help="A token to use as a placeholder for the concept.", ) parser.add_argument( "--initializer_token", type=str, default=None, required=True, help="A token to use as initializer word." ) parser.add_argument("--learnable_property", type=str, default="object", help="Choose between 'object' and 'style'") parser.add_argument("--repeats", type=int, default=100, help="How many times to repeat the training data.") parser.add_argument( "--output_dir", type=str, default="text-inversion-model", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument("--seed", type=int, default=42, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution." ) parser.add_argument( "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." ) parser.add_argument("--num_train_epochs", type=int, default=100) parser.add_argument( "--max_train_steps", type=int, default=5000, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--learning_rate", type=float, default=1e-4, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=True, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument( "--use_auth_token", action="store_true", help=( "Will use the token generated when running `huggingface-cli login` (necessary to use this script with" " private models)." ), ) parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank if args.train_data_dir is None: raise ValueError("You must specify a train data directory.") return args imagenet_templates_small = [ "a photo of a {}", "a rendering of a {}", "a cropped photo of the {}", "the photo of a {}", "a photo of a clean {}", "a photo of a dirty {}", "a dark photo of the {}", "a photo of my {}", "a photo of the cool {}", "a close-up photo of a {}", "a bright photo of the {}", "a cropped photo of a {}", "a photo of the {}", "a good photo of the {}", "a photo of one {}", "a close-up photo of the {}", "a rendition of the {}", "a photo of the clean {}", "a rendition of a {}", "a photo of a nice {}", "a good photo of a {}", "a photo of the nice {}", "a photo of the small {}", "a photo of the weird {}", "a photo of the large {}", "a photo of a cool {}", "a photo of a small {}", ] imagenet_style_templates_small = [ "a painting in the style of {}", "a rendering in the style of {}", "a cropped painting in the style of {}", "the painting in the style of {}", "a clean painting in the style of {}", "a dirty painting in the style of {}", "a dark painting in the style of {}", "a picture in the style of {}", "a cool painting in the style of {}", "a close-up painting in the style of {}", "a bright painting in the style of {}", "a cropped painting in the style of {}", "a good painting in the style of {}", "a close-up painting in the style of {}", "a rendition in the style of {}", "a nice painting in the style of {}", "a small painting in the style of {}", "a weird painting in the style of {}", "a large painting in the style of {}", ] class TextualInversionDataset(Dataset): def __init__( self, data_root, tokenizer, learnable_property="object", # [object, style] size=512, repeats=100, interpolation="bicubic", flip_p=0.5, set="train", placeholder_token="*", center_crop=False, ): self.data_root = data_root self.tokenizer = tokenizer self.learnable_property = learnable_property self.size = size self.placeholder_token = placeholder_token self.center_crop = center_crop self.flip_p = flip_p self.image_paths = [os.path.join(self.data_root, file_path) for file_path in os.listdir(self.data_root)] self.num_images = len(self.image_paths) self._length = self.num_images if set == "train": self._length = self.num_images * repeats self.interpolation = { "linear": PIL_INTERPOLATION["linear"], "bilinear": PIL_INTERPOLATION["bilinear"], "bicubic": PIL_INTERPOLATION["bicubic"], "lanczos": PIL_INTERPOLATION["lanczos"], }[interpolation] self.templates = imagenet_style_templates_small if learnable_property == "style" else imagenet_templates_small self.flip_transform = transforms.RandomHorizontalFlip(p=self.flip_p) def __len__(self): return self._length def __getitem__(self, i): example = {} image = Image.open(self.image_paths[i % self.num_images]) if not image.mode == "RGB": image = image.convert("RGB") placeholder_string = self.placeholder_token text = random.choice(self.templates).format(placeholder_string) example["input_ids"] = self.tokenizer( text, padding="max_length", truncation=True, max_length=self.tokenizer.model_max_length, return_tensors="pt", ).input_ids[0] # default to score-sde preprocessing img = np.array(image).astype(np.uint8) if self.center_crop: crop = min(img.shape[0], img.shape[1]) ( h, w, ) = ( img.shape[0], img.shape[1], ) img = img[(h - crop) // 2 : (h + crop) // 2, (w - crop) // 2 : (w + crop) // 2] image = Image.fromarray(img) image = image.resize((self.size, self.size), resample=self.interpolation) image = self.flip_transform(image) image = np.array(image).astype(np.uint8) image = (image / 127.5 - 1.0).astype(np.float32) example["pixel_values"] = torch.from_numpy(image).permute(2, 0, 1) return example def resize_token_embeddings(model, new_num_tokens, initializer_token_id, placeholder_token_id, rng): if model.config.vocab_size == new_num_tokens or new_num_tokens is None: return model.config.vocab_size = new_num_tokens params = model.params old_embeddings = params["text_model"]["embeddings"]["token_embedding"]["embedding"] old_num_tokens, emb_dim = old_embeddings.shape initializer = jax.nn.initializers.normal() new_embeddings = initializer(rng, (new_num_tokens, emb_dim)) new_embeddings = new_embeddings.at[:old_num_tokens].set(old_embeddings) new_embeddings = new_embeddings.at[placeholder_token_id].set(new_embeddings[initializer_token_id]) params["text_model"]["embeddings"]["token_embedding"]["embedding"] = new_embeddings model.params = params return model def get_params_to_save(params): return jax.device_get(jax.tree_util.tree_map(lambda x: x[0], params)) def main(): args = parse_args() if args.seed is not None: set_seed(args.seed) if jax.process_index() == 0: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token ).repo_id # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR) if jax.process_index() == 0: transformers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() # Load the tokenizer and add the placeholder token as a additional special token if args.tokenizer_name: tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name) elif args.pretrained_model_name_or_path: tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer") # Add the placeholder token in tokenizer num_added_tokens = tokenizer.add_tokens(args.placeholder_token) if num_added_tokens == 0: raise ValueError( f"The tokenizer already contains the token {args.placeholder_token}. Please pass a different" " `placeholder_token` that is not already in the tokenizer." ) # Convert the initializer_token, placeholder_token to ids token_ids = tokenizer.encode(args.initializer_token, add_special_tokens=False) # Check if initializer_token is a single token or a sequence of tokens if len(token_ids) > 1: raise ValueError("The initializer token must be a single token.") initializer_token_id = token_ids[0] placeholder_token_id = tokenizer.convert_tokens_to_ids(args.placeholder_token) # Load models and create wrapper for stable diffusion text_encoder = FlaxCLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder") vae, vae_params = FlaxAutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae") unet, unet_params = FlaxUNet2DConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet") # Create sampling rng rng = jax.random.PRNGKey(args.seed) rng, _ = jax.random.split(rng) # Resize the token embeddings as we are adding new special tokens to the tokenizer text_encoder = resize_token_embeddings( text_encoder, len(tokenizer), initializer_token_id, placeholder_token_id, rng ) original_token_embeds = text_encoder.params["text_model"]["embeddings"]["token_embedding"]["embedding"] train_dataset = TextualInversionDataset( data_root=args.train_data_dir, tokenizer=tokenizer, size=args.resolution, placeholder_token=args.placeholder_token, repeats=args.repeats, learnable_property=args.learnable_property, center_crop=args.center_crop, set="train", ) def collate_fn(examples): pixel_values = torch.stack([example["pixel_values"] for example in examples]) input_ids = torch.stack([example["input_ids"] for example in examples]) batch = {"pixel_values": pixel_values, "input_ids": input_ids} batch = {k: v.numpy() for k, v in batch.items()} return batch total_train_batch_size = args.train_batch_size * jax.local_device_count() train_dataloader = torch.utils.data.DataLoader( train_dataset, batch_size=total_train_batch_size, shuffle=True, drop_last=True, collate_fn=collate_fn ) # Optimization if args.scale_lr: args.learning_rate = args.learning_rate * total_train_batch_size constant_scheduler = optax.constant_schedule(args.learning_rate) optimizer = optax.adamw( learning_rate=constant_scheduler, b1=args.adam_beta1, b2=args.adam_beta2, eps=args.adam_epsilon, weight_decay=args.adam_weight_decay, ) def create_mask(params, label_fn): def _map(params, mask, label_fn): for k in params: if label_fn(k): mask[k] = "token_embedding" else: if isinstance(params[k], dict): mask[k] = {} _map(params[k], mask[k], label_fn) else: mask[k] = "zero" mask = {} _map(params, mask, label_fn) return mask def zero_grads(): # from https://github.com/deepmind/optax/issues/159#issuecomment-896459491 def init_fn(_): return () def update_fn(updates, state, params=None): return jax.tree_util.tree_map(jnp.zeros_like, updates), () return optax.GradientTransformation(init_fn, update_fn) # Zero out gradients of layers other than the token embedding layer tx = optax.multi_transform( {"token_embedding": optimizer, "zero": zero_grads()}, create_mask(text_encoder.params, lambda s: s == "token_embedding"), ) state = train_state.TrainState.create(apply_fn=text_encoder.__call__, params=text_encoder.params, tx=tx) noise_scheduler = FlaxDDPMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000 ) noise_scheduler_state = noise_scheduler.create_state() # Initialize our training train_rngs = jax.random.split(rng, jax.local_device_count()) # Define gradient train step fn def train_step(state, vae_params, unet_params, batch, train_rng): dropout_rng, sample_rng, new_train_rng = jax.random.split(train_rng, 3) def compute_loss(params): vae_outputs = vae.apply( {"params": vae_params}, batch["pixel_values"], deterministic=True, method=vae.encode ) latents = vae_outputs.latent_dist.sample(sample_rng) # (NHWC) -> (NCHW) latents = jnp.transpose(latents, (0, 3, 1, 2)) latents = latents * vae.config.scaling_factor noise_rng, timestep_rng = jax.random.split(sample_rng) noise = jax.random.normal(noise_rng, latents.shape) bsz = latents.shape[0] timesteps = jax.random.randint( timestep_rng, (bsz,), 0, noise_scheduler.config.num_train_timesteps, ) noisy_latents = noise_scheduler.add_noise(noise_scheduler_state, latents, noise, timesteps) encoder_hidden_states = state.apply_fn( batch["input_ids"], params=params, dropout_rng=dropout_rng, train=True )[0] # Predict the noise residual and compute loss model_pred = unet.apply( {"params": unet_params}, noisy_latents, timesteps, encoder_hidden_states, train=False ).sample # Get the target for loss depending on the prediction type if noise_scheduler.config.prediction_type == "epsilon": target = noise elif noise_scheduler.config.prediction_type == "v_prediction": target = noise_scheduler.get_velocity(noise_scheduler_state, latents, noise, timesteps) else: raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") loss = (target - model_pred) ** 2 loss = loss.mean() return loss grad_fn = jax.value_and_grad(compute_loss) loss, grad = grad_fn(state.params) grad = jax.lax.pmean(grad, "batch") new_state = state.apply_gradients(grads=grad) # Keep the token embeddings fixed except the newly added embeddings for the concept, # as we only want to optimize the concept embeddings token_embeds = original_token_embeds.at[placeholder_token_id].set( new_state.params["text_model"]["embeddings"]["token_embedding"]["embedding"][placeholder_token_id] ) new_state.params["text_model"]["embeddings"]["token_embedding"]["embedding"] = token_embeds metrics = {"loss": loss} metrics = jax.lax.pmean(metrics, axis_name="batch") return new_state, metrics, new_train_rng # Create parallel version of the train and eval step p_train_step = jax.pmap(train_step, "batch", donate_argnums=(0,)) # Replicate the train state on each device state = jax_utils.replicate(state) vae_params = jax_utils.replicate(vae_params) unet_params = jax_utils.replicate(unet_params) # Train! num_update_steps_per_epoch = math.ceil(len(train_dataloader)) # Scheduler and math around the number of training steps. if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel & distributed) = {total_train_batch_size}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 epochs = tqdm(range(args.num_train_epochs), desc=f"Epoch ... (1/{args.num_train_epochs})", position=0) for epoch in epochs: # ======================== Training ================================ train_metrics = [] steps_per_epoch = len(train_dataset) // total_train_batch_size train_step_progress_bar = tqdm(total=steps_per_epoch, desc="Training...", position=1, leave=False) # train for batch in train_dataloader: batch = shard(batch) state, train_metric, train_rngs = p_train_step(state, vae_params, unet_params, batch, train_rngs) train_metrics.append(train_metric) train_step_progress_bar.update(1) global_step += 1 if global_step >= args.max_train_steps: break train_metric = jax_utils.unreplicate(train_metric) train_step_progress_bar.close() epochs.write(f"Epoch... ({epoch + 1}/{args.num_train_epochs} | Loss: {train_metric['loss']})") # Create the pipeline using using the trained modules and save it. if jax.process_index() == 0: scheduler = FlaxPNDMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", skip_prk_steps=True ) safety_checker = FlaxStableDiffusionSafetyChecker.from_pretrained( "CompVis/stable-diffusion-safety-checker", from_pt=True ) pipeline = FlaxStableDiffusionPipeline( text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=CLIPImageProcessor.from_pretrained("openai/clip-vit-base-patch32"), ) pipeline.save_pretrained( args.output_dir, params={ "text_encoder": get_params_to_save(state.params), "vae": get_params_to_save(vae_params), "unet": get_params_to_save(unet_params), "safety_checker": safety_checker.params, }, ) # Also save the newly trained embeddings learned_embeds = get_params_to_save(state.params)["text_model"]["embeddings"]["token_embedding"]["embedding"][ placeholder_token_id ] learned_embeds_dict = {args.placeholder_token: learned_embeds} jnp.save(os.path.join(args.output_dir, "learned_embeds.npy"), learned_embeds_dict) if args.push_to_hub: upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) if __name__ == "__main__": main()
0
hf_public_repos/diffusers/examples/research_projects
hf_public_repos/diffusers/examples/research_projects/mulit_token_textual_inversion/requirements_flax.txt
transformers>=4.25.1 flax optax torch torchvision ftfy tensorboard Jinja2
0
hf_public_repos/diffusers/examples/research_projects
hf_public_repos/diffusers/examples/research_projects/multi_subject_dreambooth/requirements.txt
accelerate>=0.16.0 torchvision transformers>=4.25.1 ftfy tensorboard Jinja2
0
hf_public_repos/diffusers/examples/research_projects
hf_public_repos/diffusers/examples/research_projects/multi_subject_dreambooth/README.md
# Multi Subject DreamBooth training [DreamBooth](https://arxiv.org/abs/2208.12242) is a method to personalize text2image models like stable diffusion given just a few(3~5) images of a subject. This `train_multi_subject_dreambooth.py` script shows how to implement the training procedure for one or more subjects and adapt it for stable diffusion. Note that this code is based off of the `examples/dreambooth/train_dreambooth.py` script as of 01/06/2022. This script was added by @kopsahlong, and is not actively maintained. However, if you come across anything that could use fixing, feel free to open an issue and tag @kopsahlong. ## Running locally with PyTorch ### Installing the dependencies Before running the script, make sure to install the library's training dependencies: To start, execute the following steps in a new virtual environment: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install -e . ``` Then cd into the folder `diffusers/examples/research_projects/multi_subject_dreambooth` and run the following: ```bash pip install -r requirements.txt ``` And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with: ```bash accelerate config ``` Or for a default accelerate configuration without answering questions about your environment ```bash accelerate config default ``` Or if your environment doesn't support an interactive shell e.g. a notebook ```python from accelerate.utils import write_basic_config write_basic_config() ``` ### Multi Subject Training Example In order to have your model learn multiple concepts at once, we simply add in the additional data directories and prompts to our `instance_data_dir` and `instance_prompt` (as well as `class_data_dir` and `class_prompt` if `--with_prior_preservation` is specified) as one comma separated string. See an example with 2 subjects below, which learns a model for one dog subject and one human subject: ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export OUTPUT_DIR="path-to-save-model" # Subject 1 export INSTANCE_DIR_1="path-to-instance-images-concept-1" export INSTANCE_PROMPT_1="a photo of a sks dog" export CLASS_DIR_1="path-to-class-images-dog" export CLASS_PROMPT_1="a photo of a dog" # Subject 2 export INSTANCE_DIR_2="path-to-instance-images-concept-2" export INSTANCE_PROMPT_2="a photo of a t@y person" export CLASS_DIR_2="path-to-class-images-person" export CLASS_PROMPT_2="a photo of a person" accelerate launch train_multi_subject_dreambooth.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir="$INSTANCE_DIR_1,$INSTANCE_DIR_2" \ --output_dir=$OUTPUT_DIR \ --train_text_encoder \ --instance_prompt="$INSTANCE_PROMPT_1,$INSTANCE_PROMPT_2" \ --with_prior_preservation \ --prior_loss_weight=1.0 \ --class_data_dir="$CLASS_DIR_1,$CLASS_DIR_2" \ --class_prompt="$CLASS_PROMPT_1,$CLASS_PROMPT_2"\ --num_class_images=50 \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=1 \ --learning_rate=1e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --max_train_steps=1500 ``` This example shows training for 2 subjects, but please note that the model can be trained on any number of new concepts. This can be done by continuing to add in the corresponding directories and prompts to the corresponding comma separated string. Note also that in this script, `sks` and `t@y` were used as tokens to learn the new subjects ([this thread](https://github.com/XavierXiao/Dreambooth-Stable-Diffusion/issues/71) inspired the use of `t@y` as our second identifier). However, there may be better rare tokens to experiment with, and results also seemed to be good when more intuitive words are used. **Important**: New parameters are added to the script, making possible to validate the progress of the training by generating images at specified steps. Taking also into account that a comma separated list in a text field for a prompt it's never a good idea (simply because it is very common in prompts to have them as part of a regular text) we introduce the `concept_list` parameter: allowing to specify a json-like file where you can define the different configuration for each subject that you want to train. An example of how to generate the file: ```python import json # here we are using parameters for prior-preservation and validation as well. concepts_list = [ { "instance_prompt": "drawing of a t@y meme", "class_prompt": "drawing of a meme", "instance_data_dir": "/some_folder/meme_toy", "class_data_dir": "/data/meme", "validation_prompt": "drawing of a t@y meme about football in Uruguay", "validation_negative_prompt": "black and white" }, { "instance_prompt": "drawing of a sks sir", "class_prompt": "drawing of a sir", "instance_data_dir": "/some_other_folder/sir_sks", "class_data_dir": "/data/sir", "validation_prompt": "drawing of a sks sir with the Uruguayan sun in his chest", "validation_negative_prompt": "an old man", "validation_guidance_scale": 20, "validation_number_images": 3, "validation_inference_steps": 10 } ] with open("concepts_list.json", "w") as f: json.dump(concepts_list, f, indent=4) ``` And then just point to the file when executing the script: ```bash # exports... accelerate launch train_multi_subject_dreambooth.py \ # more parameters... --concepts_list="concepts_list.json" ``` You can use the helper from the script to get a better sense of each parameter. ### Inference Once you have trained a model using above command, the inference can be done simply using the `StableDiffusionPipeline`. Make sure to include the `identifier`(e.g. sks in above example) in your prompt. ```python from diffusers import StableDiffusionPipeline import torch model_id = "path-to-your-trained-model" pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") prompt = "A photo of a t@y person petting an sks dog" image = pipe(prompt, num_inference_steps=200, guidance_scale=7.5).images[0] image.save("person-petting-dog.png") ``` ### Inference from a training checkpoint You can also perform inference from one of the checkpoints saved during the training process, if you used the `--checkpointing_steps` argument. Please, refer to [the documentation](https://huggingface.co/docs/diffusers/main/en/training/dreambooth#performing-inference-using-a-saved-checkpoint) to see how to do it. ## Additional Dreambooth documentation Because the `train_multi_subject_dreambooth.py` script here was forked from an original version of `train_dreambooth.py` in the `examples/dreambooth` folder, I've included the original applicable training documentation for single subject examples below. This should explain how to play with training variables such as prior preservation, fine tuning the text encoder, etc. which is still applicable to our multi subject training code. Note also that the examples below, which are single subject examples, also work with `train_multi_subject_dreambooth.py`, as this script supports 1 (or more) subjects. ### Single subject dog toy example Let's get our dataset. Download images from [here](https://drive.google.com/drive/folders/1BO_dyz-p65qhBRRMRA4TbZ8qW4rB99JZ) and save them in a directory. This will be our training data. And launch the training using **___Note: Change the `resolution` to 768 if you are using the [stable-diffusion-2](https://huggingface.co/stabilityai/stable-diffusion-2) 768x768 model.___** ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export INSTANCE_DIR="path-to-instance-images" export OUTPUT_DIR="path-to-save-model" accelerate launch train_dreambooth.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --output_dir=$OUTPUT_DIR \ --instance_prompt="a photo of sks dog" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=1 \ --learning_rate=5e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --max_train_steps=400 ``` ### Training with prior-preservation loss Prior-preservation is used to avoid overfitting and language-drift. Refer to the paper to learn more about it. For prior-preservation we first generate images using the model with a class prompt and then use those during training along with our data. According to the paper, it's recommended to generate `num_epochs * num_samples` images for prior-preservation. 200-300 works well for most cases. The `num_class_images` flag sets the number of images to generate with the class prompt. You can place existing images in `class_data_dir`, and the training script will generate any additional images so that `num_class_images` are present in `class_data_dir` during training time. ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export INSTANCE_DIR="path-to-instance-images" export CLASS_DIR="path-to-class-images" export OUTPUT_DIR="path-to-save-model" accelerate launch train_dreambooth.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir=$CLASS_DIR \ --output_dir=$OUTPUT_DIR \ --with_prior_preservation --prior_loss_weight=1.0 \ --instance_prompt="a photo of sks dog" \ --class_prompt="a photo of dog" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=1 \ --learning_rate=5e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --num_class_images=200 \ --max_train_steps=800 ``` ### Training on a 16GB GPU: With the help of gradient checkpointing and the 8-bit optimizer from bitsandbytes it's possible to run train dreambooth on a 16GB GPU. To install `bitandbytes` please refer to this [readme](https://github.com/TimDettmers/bitsandbytes#requirements--installation). ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export INSTANCE_DIR="path-to-instance-images" export CLASS_DIR="path-to-class-images" export OUTPUT_DIR="path-to-save-model" accelerate launch train_dreambooth.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir=$CLASS_DIR \ --output_dir=$OUTPUT_DIR \ --with_prior_preservation --prior_loss_weight=1.0 \ --instance_prompt="a photo of sks dog" \ --class_prompt="a photo of dog" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=2 --gradient_checkpointing \ --use_8bit_adam \ --learning_rate=5e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --num_class_images=200 \ --max_train_steps=800 ``` ### Training on a 8 GB GPU: By using [DeepSpeed](https://www.deepspeed.ai/) it's possible to offload some tensors from VRAM to either CPU or NVME allowing to train with less VRAM. DeepSpeed needs to be enabled with `accelerate config`. During configuration answer yes to "Do you want to use DeepSpeed?". With DeepSpeed stage 2, fp16 mixed precision and offloading both parameters and optimizer state to cpu it's possible to train on under 8 GB VRAM with a drawback of requiring significantly more RAM (about 25 GB). See [documentation](https://huggingface.co/docs/accelerate/usage_guides/deepspeed) for more DeepSpeed configuration options. Changing the default Adam optimizer to DeepSpeed's special version of Adam `deepspeed.ops.adam.DeepSpeedCPUAdam` gives a substantial speedup but enabling it requires CUDA toolchain with the same version as pytorch. 8-bit optimizer does not seem to be compatible with DeepSpeed at the moment. ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export INSTANCE_DIR="path-to-instance-images" export CLASS_DIR="path-to-class-images" export OUTPUT_DIR="path-to-save-model" accelerate launch --mixed_precision="fp16" train_dreambooth.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir=$CLASS_DIR \ --output_dir=$OUTPUT_DIR \ --with_prior_preservation --prior_loss_weight=1.0 \ --instance_prompt="a photo of sks dog" \ --class_prompt="a photo of dog" \ --resolution=512 \ --train_batch_size=1 \ --sample_batch_size=1 \ --gradient_accumulation_steps=1 --gradient_checkpointing \ --learning_rate=5e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --num_class_images=200 \ --max_train_steps=800 ``` ### Fine-tune text encoder with the UNet. The script also allows to fine-tune the `text_encoder` along with the `unet`. It's been observed experimentally that fine-tuning `text_encoder` gives much better results especially on faces. Pass the `--train_text_encoder` argument to the script to enable training `text_encoder`. ___Note: Training text encoder requires more memory, with this option the training won't fit on 16GB GPU. It needs at least 24GB VRAM.___ ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export INSTANCE_DIR="path-to-instance-images" export CLASS_DIR="path-to-class-images" export OUTPUT_DIR="path-to-save-model" accelerate launch train_dreambooth.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --train_text_encoder \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir=$CLASS_DIR \ --output_dir=$OUTPUT_DIR \ --with_prior_preservation --prior_loss_weight=1.0 \ --instance_prompt="a photo of sks dog" \ --class_prompt="a photo of dog" \ --resolution=512 \ --train_batch_size=1 \ --use_8bit_adam \ --gradient_checkpointing \ --learning_rate=2e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --num_class_images=200 \ --max_train_steps=800 ``` ### Using DreamBooth for other pipelines than Stable Diffusion Altdiffusion also support dreambooth now, the runing comman is basically the same as abouve, all you need to do is replace the `MODEL_NAME` like this: One can now simply change the `pretrained_model_name_or_path` to another architecture such as [`AltDiffusion`](https://huggingface.co/docs/diffusers/api/pipelines/alt_diffusion). ``` export MODEL_NAME="CompVis/stable-diffusion-v1-4" --> export MODEL_NAME="BAAI/AltDiffusion-m9" or export MODEL_NAME="CompVis/stable-diffusion-v1-4" --> export MODEL_NAME="BAAI/AltDiffusion" ``` ### Training with xformers: You can enable memory efficient attention by [installing xFormers](https://github.com/facebookresearch/xformers#installing-xformers) and padding the `--enable_xformers_memory_efficient_attention` argument to the script. This is not available with the Flax/JAX implementation. You can also use Dreambooth to train the specialized in-painting model. See [the script in the research folder for details](https://github.com/huggingface/diffusers/tree/main/examples/research_projects/dreambooth_inpaint).
0
hf_public_repos/diffusers/examples/research_projects
hf_public_repos/diffusers/examples/research_projects/multi_subject_dreambooth/train_multi_subject_dreambooth.py
import argparse import hashlib import itertools import json import logging import math import uuid import warnings from os import environ, listdir, makedirs from os.path import basename, join from pathlib import Path from typing import List import datasets import numpy as np import torch import torch.nn.functional as F import torch.utils.checkpoint import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import ProjectConfiguration, set_seed from huggingface_hub import create_repo, upload_folder from PIL import Image from torch import dtype from torch.nn import Module from torch.utils.data import Dataset from torchvision import transforms from tqdm.auto import tqdm from transformers import AutoTokenizer, PretrainedConfig import diffusers from diffusers import ( AutoencoderKL, DDPMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, UNet2DConditionModel, ) from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version, is_wandb_available from diffusers.utils.import_utils import is_xformers_available if is_wandb_available(): import wandb # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.13.0.dev0") logger = get_logger(__name__) def log_validation_images_to_tracker( images: List[np.array], label: str, validation_prompt: str, accelerator: Accelerator, epoch: int ): logger.info(f"Logging images to tracker for validation prompt: {validation_prompt}.") for tracker in accelerator.trackers: if tracker.name == "tensorboard": np_images = np.stack([np.asarray(img) for img in images]) tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC") if tracker.name == "wandb": tracker.log( { "validation": [ wandb.Image(image, caption=f"{label}_{epoch}_{i}: {validation_prompt}") for i, image in enumerate(images) ] } ) # TODO: Add `prompt_embeds` and `negative_prompt_embeds` parameters to the function when `pre_compute_text_embeddings` # argument is implemented. def generate_validation_images( text_encoder: Module, tokenizer: Module, unet: Module, vae: Module, arguments: argparse.Namespace, accelerator: Accelerator, weight_dtype: dtype, ): logger.info("Running validation images.") pipeline_args = {} if text_encoder is not None: pipeline_args["text_encoder"] = accelerator.unwrap_model(text_encoder) if vae is not None: pipeline_args["vae"] = vae # create pipeline (note: unet and vae are loaded again in float32) pipeline = DiffusionPipeline.from_pretrained( arguments.pretrained_model_name_or_path, tokenizer=tokenizer, unet=accelerator.unwrap_model(unet), revision=arguments.revision, torch_dtype=weight_dtype, **pipeline_args, ) # We train on the simplified learning objective. If we were previously predicting a variance, we need the # scheduler to ignore it scheduler_args = {} if "variance_type" in pipeline.scheduler.config: variance_type = pipeline.scheduler.config.variance_type if variance_type in ["learned", "learned_range"]: variance_type = "fixed_small" scheduler_args["variance_type"] = variance_type pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config, **scheduler_args) pipeline = pipeline.to(accelerator.device) pipeline.set_progress_bar_config(disable=True) generator = ( None if arguments.seed is None else torch.Generator(device=accelerator.device).manual_seed(arguments.seed) ) images_sets = [] for vp, nvi, vnp, vis, vgs in zip( arguments.validation_prompt, arguments.validation_number_images, arguments.validation_negative_prompt, arguments.validation_inference_steps, arguments.validation_guidance_scale, ): images = [] if vp is not None: logger.info( f"Generating {nvi} images with prompt: '{vp}', negative prompt: '{vnp}', inference steps: {vis}, " f"guidance scale: {vgs}." ) pipeline_args = {"prompt": vp, "negative_prompt": vnp, "num_inference_steps": vis, "guidance_scale": vgs} # run inference # TODO: it would be good to measure whether it's faster to run inference on all images at once, one at a # time or in small batches for _ in range(nvi): with torch.autocast("cuda"): image = pipeline(**pipeline_args, num_images_per_prompt=1, generator=generator).images[0] images.append(image) images_sets.append(images) del pipeline if torch.cuda.is_available(): torch.cuda.empty_cache() return images_sets def import_model_class_from_model_name_or_path(pretrained_model_name_or_path: str, revision: str): text_encoder_config = PretrainedConfig.from_pretrained( pretrained_model_name_or_path, subfolder="text_encoder", revision=revision, ) model_class = text_encoder_config.architectures[0] if model_class == "CLIPTextModel": from transformers import CLIPTextModel return CLIPTextModel elif model_class == "RobertaSeriesModelWithTransformation": from diffusers.pipelines.alt_diffusion.modeling_roberta_series import RobertaSeriesModelWithTransformation return RobertaSeriesModelWithTransformation else: raise ValueError(f"{model_class} is not supported.") def parse_args(input_args=None): parser = argparse.ArgumentParser(description="Simple example of a training script.") parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--instance_data_dir", type=str, default=None, required=False, help="A folder containing the training data of instance images.", ) parser.add_argument( "--class_data_dir", type=str, default=None, required=False, help="A folder containing the training data of class images.", ) parser.add_argument( "--instance_prompt", type=str, default=None, required=False, help="The prompt with identifier specifying the instance", ) parser.add_argument( "--class_prompt", type=str, default=None, help="The prompt to specify images in the same class as provided instance images.", ) parser.add_argument( "--with_prior_preservation", default=False, action="store_true", help="Flag to add prior preservation loss.", ) parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.") parser.add_argument( "--num_class_images", type=int, default=100, help=( "Minimal class images for prior preservation loss. If there are not enough images already present in" " class_data_dir, additional images will be sampled with class_prompt." ), ) parser.add_argument( "--output_dir", type=str, default="text-inversion-model", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--center_crop", default=False, action="store_true", help=( "Whether to center crop the input images to the resolution. If not set, the images will be randomly" " cropped. The images will be resized to the resolution first before cropping." ), ) parser.add_argument("--train_text_encoder", action="store_true", help="Whether to train the text encoder") parser.add_argument( "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader." ) parser.add_argument( "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images." ) parser.add_argument("--num_train_epochs", type=int, default=1) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. These checkpoints can be used both as final" " checkpoints in case they are better than the last checkpoint, and are also suitable for resuming" " training using `--resume_from_checkpoint`." ), ) parser.add_argument( "--checkpoints_total_limit", type=int, default=None, help=( "Max number of checkpoints to store. Passed as `total_limit` to the `Accelerator` `ProjectConfiguration`." " See Accelerator::save_state https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.save_state" " for more docs" ), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--learning_rate", type=float, default=5e-6, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--lr_num_cycles", type=int, default=1, help="Number of hard resets of the lr in cosine_with_restarts scheduler.", ) parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.") parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--allow_tf32", action="store_true", help=( "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" ), ) parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) parser.add_argument( "--validation_steps", type=int, default=None, help=( "Run validation every X steps. Validation consists of running the prompt(s) `validation_prompt` " "multiple times (`validation_number_images`) and logging the images." ), ) parser.add_argument( "--validation_prompt", type=str, default=None, help="A prompt that is used during validation to verify that the model is learning. You can use commas to " "define multiple negative prompts. This parameter can be defined also within the file given by " "`concepts_list` parameter in the respective subject.", ) parser.add_argument( "--validation_number_images", type=int, default=4, help="Number of images that should be generated during validation with the validation parameters given. This " "can be defined within the file given by `concepts_list` parameter in the respective subject.", ) parser.add_argument( "--validation_negative_prompt", type=str, default=None, help="A negative prompt that is used during validation to verify that the model is learning. You can use commas" " to define multiple negative prompts, each one corresponding to a validation prompt. This parameter can " "be defined also within the file given by `concepts_list` parameter in the respective subject.", ) parser.add_argument( "--validation_inference_steps", type=int, default=25, help="Number of inference steps (denoising steps) to run during validation. This can be defined within the " "file given by `concepts_list` parameter in the respective subject.", ) parser.add_argument( "--validation_guidance_scale", type=float, default=7.5, help="To control how much the image generation process follows the text prompt. This can be defined within the " "file given by `concepts_list` parameter in the respective subject.", ) parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." ), ) parser.add_argument( "--prior_generation_precision", type=str, default=None, choices=["no", "fp32", "fp16", "bf16"], help=( "Choose prior generation precision between fp32, fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to fp16 if a GPU is available else fp32." ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument( "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." ) parser.add_argument( "--set_grads_to_none", action="store_true", help=( "Save more memory by using setting grads to None instead of zero. Be aware, that this changes certain" " behaviors, so disable this argument if it causes any problems. More info:" " https://pytorch.org/docs/stable/generated/torch.optim.Optimizer.zero_grad.html" ), ) parser.add_argument( "--concepts_list", type=str, default=None, help="Path to json file containing a list of multiple concepts, will overwrite parameters like instance_prompt," " class_prompt, etc.", ) if input_args: args = parser.parse_args(input_args) else: args = parser.parse_args() if not args.concepts_list and (not args.instance_data_dir or not args.instance_prompt): raise ValueError( "You must specify either instance parameters (data directory, prompt, etc.) or use " "the `concept_list` parameter and specify them within the file." ) if args.concepts_list: if args.instance_prompt: raise ValueError("If you are using `concepts_list` parameter, define the instance prompt within the file.") if args.instance_data_dir: raise ValueError( "If you are using `concepts_list` parameter, define the instance data directory within the file." ) if args.validation_steps and (args.validation_prompt or args.validation_negative_prompt): raise ValueError( "If you are using `concepts_list` parameter, define validation parameters for " "each subject within the file:\n - `validation_prompt`." "\n - `validation_negative_prompt`.\n - `validation_guidance_scale`." "\n - `validation_number_images`.\n - `validation_prompt`." "\n - `validation_inference_steps`.\nThe `validation_steps` parameter is the only one " "that needs to be defined outside the file." ) env_local_rank = int(environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank if args.with_prior_preservation: if not args.concepts_list: if not args.class_data_dir: raise ValueError("You must specify a data directory for class images.") if not args.class_prompt: raise ValueError("You must specify prompt for class images.") else: if args.class_data_dir: raise ValueError( "If you are using `concepts_list` parameter, define the class data directory within the file." ) if args.class_prompt: raise ValueError( "If you are using `concepts_list` parameter, define the class prompt within the file." ) else: # logger is not available yet if not args.class_data_dir: warnings.warn( "Ignoring `class_data_dir` parameter, you need to use it together with `with_prior_preservation`." ) if not args.class_prompt: warnings.warn( "Ignoring `class_prompt` parameter, you need to use it together with `with_prior_preservation`." ) return args class DreamBoothDataset(Dataset): """ A dataset to prepare the instance and class images with the prompts for fine-tuning the model. It pre-processes the images and then tokenizes prompts. """ def __init__( self, instance_data_root, instance_prompt, tokenizer, class_data_root=None, class_prompt=None, size=512, center_crop=False, ): self.size = size self.center_crop = center_crop self.tokenizer = tokenizer self.instance_data_root = [] self.instance_images_path = [] self.num_instance_images = [] self.instance_prompt = [] self.class_data_root = [] if class_data_root is not None else None self.class_images_path = [] self.num_class_images = [] self.class_prompt = [] self._length = 0 for i in range(len(instance_data_root)): self.instance_data_root.append(Path(instance_data_root[i])) if not self.instance_data_root[i].exists(): raise ValueError("Instance images root doesn't exists.") self.instance_images_path.append(list(Path(instance_data_root[i]).iterdir())) self.num_instance_images.append(len(self.instance_images_path[i])) self.instance_prompt.append(instance_prompt[i]) self._length += self.num_instance_images[i] if class_data_root is not None: self.class_data_root.append(Path(class_data_root[i])) self.class_data_root[i].mkdir(parents=True, exist_ok=True) self.class_images_path.append(list(self.class_data_root[i].iterdir())) self.num_class_images.append(len(self.class_images_path)) if self.num_class_images[i] > self.num_instance_images[i]: self._length -= self.num_instance_images[i] self._length += self.num_class_images[i] self.class_prompt.append(class_prompt[i]) self.image_transforms = transforms.Compose( [ transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR), transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def __len__(self): return self._length def __getitem__(self, index): example = {} for i in range(len(self.instance_images_path)): instance_image = Image.open(self.instance_images_path[i][index % self.num_instance_images[i]]) if not instance_image.mode == "RGB": instance_image = instance_image.convert("RGB") example[f"instance_images_{i}"] = self.image_transforms(instance_image) example[f"instance_prompt_ids_{i}"] = self.tokenizer( self.instance_prompt[i], truncation=True, padding="max_length", max_length=self.tokenizer.model_max_length, return_tensors="pt", ).input_ids if self.class_data_root: for i in range(len(self.class_data_root)): class_image = Image.open(self.class_images_path[i][index % self.num_class_images[i]]) if not class_image.mode == "RGB": class_image = class_image.convert("RGB") example[f"class_images_{i}"] = self.image_transforms(class_image) example[f"class_prompt_ids_{i}"] = self.tokenizer( self.class_prompt[i], truncation=True, padding="max_length", max_length=self.tokenizer.model_max_length, return_tensors="pt", ).input_ids return example def collate_fn(num_instances, examples, with_prior_preservation=False): input_ids = [] pixel_values = [] for i in range(num_instances): input_ids += [example[f"instance_prompt_ids_{i}"] for example in examples] pixel_values += [example[f"instance_images_{i}"] for example in examples] # Concat class and instance examples for prior preservation. # We do this to avoid doing two forward passes. if with_prior_preservation: for i in range(num_instances): input_ids += [example[f"class_prompt_ids_{i}"] for example in examples] pixel_values += [example[f"class_images_{i}"] for example in examples] pixel_values = torch.stack(pixel_values) pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() input_ids = torch.cat(input_ids, dim=0) batch = { "input_ids": input_ids, "pixel_values": pixel_values, } return batch class PromptDataset(Dataset): """A simple dataset to prepare the prompts to generate class images on multiple GPUs.""" def __init__(self, prompt, num_samples): self.prompt = prompt self.num_samples = num_samples def __len__(self): return self.num_samples def __getitem__(self, index): example = {} example["prompt"] = self.prompt example["index"] = index return example def main(args): logging_dir = Path(args.output_dir, args.logging_dir) accelerator_project_config = ProjectConfiguration( total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir ) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_config=accelerator_project_config, ) if args.report_to == "wandb": if not is_wandb_available(): raise ImportError("Make sure to install wandb if you want to use it for logging during training.") # Currently, it's not possible to do gradient accumulation when training two models with accelerate.accumulate # This will be enabled soon in accelerate. For now, we don't allow gradient accumulation when training two models. # TODO (patil-suraj): Remove this check when gradient accumulation with two models is enabled in accelerate. if args.train_text_encoder and args.gradient_accumulation_steps > 1 and accelerator.num_processes > 1: raise ValueError( "Gradient accumulation is not supported when training the text encoder in distributed training. " "Please set gradient_accumulation_steps to 1. This feature will be supported in the future." ) instance_data_dir = [] instance_prompt = [] class_data_dir = [] if args.with_prior_preservation else None class_prompt = [] if args.with_prior_preservation else None if args.concepts_list: with open(args.concepts_list, "r") as f: concepts_list = json.load(f) if args.validation_steps: args.validation_prompt = [] args.validation_number_images = [] args.validation_negative_prompt = [] args.validation_inference_steps = [] args.validation_guidance_scale = [] for concept in concepts_list: instance_data_dir.append(concept["instance_data_dir"]) instance_prompt.append(concept["instance_prompt"]) if args.with_prior_preservation: try: class_data_dir.append(concept["class_data_dir"]) class_prompt.append(concept["class_prompt"]) except KeyError: raise KeyError( "`class_data_dir` or `class_prompt` not found in concepts_list while using " "`with_prior_preservation`." ) else: if "class_data_dir" in concept: warnings.warn( "Ignoring `class_data_dir` key, to use it you need to enable `with_prior_preservation`." ) if "class_prompt" in concept: warnings.warn( "Ignoring `class_prompt` key, to use it you need to enable `with_prior_preservation`." ) if args.validation_steps: args.validation_prompt.append(concept.get("validation_prompt", None)) args.validation_number_images.append(concept.get("validation_number_images", 4)) args.validation_negative_prompt.append(concept.get("validation_negative_prompt", None)) args.validation_inference_steps.append(concept.get("validation_inference_steps", 25)) args.validation_guidance_scale.append(concept.get("validation_guidance_scale", 7.5)) else: # Parse instance and class inputs, and double check that lengths match instance_data_dir = args.instance_data_dir.split(",") instance_prompt = args.instance_prompt.split(",") assert all( x == len(instance_data_dir) for x in [len(instance_data_dir), len(instance_prompt)] ), "Instance data dir and prompt inputs are not of the same length." if args.with_prior_preservation: class_data_dir = args.class_data_dir.split(",") class_prompt = args.class_prompt.split(",") assert all( x == len(instance_data_dir) for x in [len(instance_data_dir), len(instance_prompt), len(class_data_dir), len(class_prompt)] ), "Instance & class data dir or prompt inputs are not of the same length." if args.validation_steps: validation_prompts = args.validation_prompt.split(",") num_of_validation_prompts = len(validation_prompts) args.validation_prompt = validation_prompts args.validation_number_images = [args.validation_number_images] * num_of_validation_prompts negative_validation_prompts = [None] * num_of_validation_prompts if args.validation_negative_prompt: negative_validation_prompts = args.validation_negative_prompt.split(",") while len(negative_validation_prompts) < num_of_validation_prompts: negative_validation_prompts.append(None) args.validation_negative_prompt = negative_validation_prompts assert num_of_validation_prompts == len( negative_validation_prompts ), "The length of negative prompts for validation is greater than the number of validation prompts." args.validation_inference_steps = [args.validation_inference_steps] * num_of_validation_prompts args.validation_guidance_scale = [args.validation_guidance_scale] * num_of_validation_prompts # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Generate class images if prior preservation is enabled. if args.with_prior_preservation: for i in range(len(class_data_dir)): class_images_dir = Path(class_data_dir[i]) if not class_images_dir.exists(): class_images_dir.mkdir(parents=True) cur_class_images = len(list(class_images_dir.iterdir())) if cur_class_images < args.num_class_images: torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32 if args.prior_generation_precision == "fp32": torch_dtype = torch.float32 elif args.prior_generation_precision == "fp16": torch_dtype = torch.float16 elif args.prior_generation_precision == "bf16": torch_dtype = torch.bfloat16 pipeline = DiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, torch_dtype=torch_dtype, safety_checker=None, revision=args.revision, ) pipeline.set_progress_bar_config(disable=True) num_new_images = args.num_class_images - cur_class_images logger.info(f"Number of class images to sample: {num_new_images}.") sample_dataset = PromptDataset(class_prompt[i], num_new_images) sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size) sample_dataloader = accelerator.prepare(sample_dataloader) pipeline.to(accelerator.device) for example in tqdm( sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process ): images = pipeline(example["prompt"]).images for ii, image in enumerate(images): hash_image = hashlib.sha1(image.tobytes()).hexdigest() image_filename = ( class_images_dir / f"{example['index'][ii] + cur_class_images}-{hash_image}.jpg" ) image.save(image_filename) # Clean up the memory deleting one-time-use variables. del pipeline del sample_dataloader del sample_dataset if torch.cuda.is_available(): torch.cuda.empty_cache() # Handle the repository creation if accelerator.is_main_process: if args.output_dir is not None: makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token ).repo_id # Load the tokenizer tokenizer = None if args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, revision=args.revision, use_fast=False) elif args.pretrained_model_name_or_path: tokenizer = AutoTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision, use_fast=False, ) # import correct text encoder class text_encoder_cls = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path, args.revision) # Load scheduler and models noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") text_encoder = text_encoder_cls.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision ) vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision) unet = UNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision ) vae.requires_grad_(False) if not args.train_text_encoder: text_encoder.requires_grad_(False) if args.enable_xformers_memory_efficient_attention: if is_xformers_available(): unet.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") if args.gradient_checkpointing: unet.enable_gradient_checkpointing() if args.train_text_encoder: text_encoder.gradient_checkpointing_enable() # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if args.allow_tf32: torch.backends.cuda.matmul.allow_tf32 = True if args.scale_lr: args.learning_rate = ( args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes ) # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs if args.use_8bit_adam: try: import bitsandbytes as bnb except ImportError: raise ImportError( "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." ) optimizer_class = bnb.optim.AdamW8bit else: optimizer_class = torch.optim.AdamW # Optimizer creation params_to_optimize = ( itertools.chain(unet.parameters(), text_encoder.parameters()) if args.train_text_encoder else unet.parameters() ) optimizer = optimizer_class( params_to_optimize, lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) # Dataset and DataLoaders creation: train_dataset = DreamBoothDataset( instance_data_root=instance_data_dir, instance_prompt=instance_prompt, class_data_root=class_data_dir, class_prompt=class_prompt, tokenizer=tokenizer, size=args.resolution, center_crop=args.center_crop, ) train_dataloader = torch.utils.data.DataLoader( train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=lambda examples: collate_fn(len(instance_data_dir), examples, args.with_prior_preservation), num_workers=1, ) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes, num_training_steps=args.max_train_steps * accelerator.num_processes, num_cycles=args.lr_num_cycles, power=args.lr_power, ) # Prepare everything with our `accelerator`. if args.train_text_encoder: unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( unet, text_encoder, optimizer, train_dataloader, lr_scheduler ) else: unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( unet, optimizer, train_dataloader, lr_scheduler ) # For mixed precision training we cast the text_encoder and vae weights to half-precision # as these models are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # Move vae and text_encoder to device and cast to weight_dtype vae.to(accelerator.device, dtype=weight_dtype) if not args.train_text_encoder: text_encoder.to(accelerator.device, dtype=weight_dtype) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initialize automatically on the main process. if accelerator.is_main_process: accelerator.init_trackers("dreambooth", config=vars(args)) # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num batches each epoch = {len(train_dataloader)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 first_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint != "latest": path = basename(args.resume_from_checkpoint) else: # Get the mos recent checkpoint dirs = listdir(args.output_dir) dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) path = dirs[-1] if len(dirs) > 0 else None if path is None: accelerator.print( f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." ) args.resume_from_checkpoint = None else: accelerator.print(f"Resuming from checkpoint {path}") accelerator.load_state(join(args.output_dir, path)) global_step = int(path.split("-")[1]) resume_global_step = global_step * args.gradient_accumulation_steps first_epoch = global_step // num_update_steps_per_epoch resume_step = resume_global_step % (num_update_steps_per_epoch * args.gradient_accumulation_steps) # Only show the progress bar once on each machine. progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process) progress_bar.set_description("Steps") for epoch in range(first_epoch, args.num_train_epochs): unet.train() if args.train_text_encoder: text_encoder.train() for step, batch in enumerate(train_dataloader): # Skip steps until we reach the resumed step if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step: if step % args.gradient_accumulation_steps == 0: progress_bar.update(1) continue with accelerator.accumulate(unet): # Convert images to latent space latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample() latents = latents * vae.config.scaling_factor # Sample noise that we'll add to the latents noise = torch.randn_like(latents) bsz = latents.shape[0] # Sample a random timestep for each image time_steps = torch.randint( 0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device ) time_steps = time_steps.long() # Add noise to the latents according to the noise magnitude at each timestep # (this is the forward diffusion process) noisy_latents = noise_scheduler.add_noise(latents, noise, time_steps) # Get the text embedding for conditioning encoder_hidden_states = text_encoder(batch["input_ids"])[0] # Predict the noise residual model_pred = unet(noisy_latents, time_steps, encoder_hidden_states).sample # Get the target for loss depending on the prediction type if noise_scheduler.config.prediction_type == "epsilon": target = noise elif noise_scheduler.config.prediction_type == "v_prediction": target = noise_scheduler.get_velocity(latents, noise, time_steps) else: raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") if args.with_prior_preservation: # Chunk the noise and model_pred into two parts and compute the loss on each part separately. model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0) target, target_prior = torch.chunk(target, 2, dim=0) # Compute instance loss loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") # Compute prior loss prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean") # Add the prior loss to the instance loss. loss = loss + args.prior_loss_weight * prior_loss else: loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") accelerator.backward(loss) if accelerator.sync_gradients: params_to_clip = ( itertools.chain(unet.parameters(), text_encoder.parameters()) if args.train_text_encoder else unet.parameters() ) accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad(set_to_none=args.set_grads_to_none) # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: progress_bar.update(1) global_step += 1 if accelerator.is_main_process: if global_step % args.checkpointing_steps == 0: save_path = join(args.output_dir, f"checkpoint-{global_step}") accelerator.save_state(save_path) logger.info(f"Saved state to {save_path}") if ( args.validation_steps and any(args.validation_prompt) and global_step % args.validation_steps == 0 ): images_set = generate_validation_images( text_encoder, tokenizer, unet, vae, args, accelerator, weight_dtype ) for images, validation_prompt in zip(images_set, args.validation_prompt): if len(images) > 0: label = str(uuid.uuid1())[:8] # generate an id for different set of images log_validation_images_to_tracker( images, label, validation_prompt, accelerator, global_step ) logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) accelerator.log(logs, step=global_step) if global_step >= args.max_train_steps: break # Create the pipeline using the trained modules and save it. accelerator.wait_for_everyone() if accelerator.is_main_process: pipeline = DiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, unet=accelerator.unwrap_model(unet), text_encoder=accelerator.unwrap_model(text_encoder), revision=args.revision, ) pipeline.save_pretrained(args.output_dir) if args.push_to_hub: upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) accelerator.end_training() if __name__ == "__main__": args = parse_args() main(args)
0
hf_public_repos/diffusers/examples/research_projects
hf_public_repos/diffusers/examples/research_projects/colossalai/README.md
# [DreamBooth](https://github.com/huggingface/diffusers/tree/main/examples/dreambooth) by [colossalai](https://github.com/hpcaitech/ColossalAI.git) [DreamBooth](https://arxiv.org/abs/2208.12242) is a method to personalize text2image models like stable diffusion given just a few(3~5) images of a subject. The `train_dreambooth_colossalai.py` script shows how to implement the training procedure and adapt it for stable diffusion. By accommodating model data in CPU and GPU and moving the data to the computing device when necessary, [Gemini](https://www.colossalai.org/docs/advanced_tutorials/meet_gemini), the Heterogeneous Memory Manager of [Colossal-AI](https://github.com/hpcaitech/ColossalAI) can breakthrough the GPU memory wall by using GPU and CPU memory (composed of CPU DRAM or nvme SSD memory) together at the same time. Moreover, the model scale can be further improved by combining heterogeneous training with the other parallel approaches, such as data parallel, tensor parallel and pipeline parallel. ## Installing the dependencies Before running the scripts, make sure to install the library's training dependencies: ```bash pip install -r requirements.txt ``` ## Install [ColossalAI](https://github.com/hpcaitech/ColossalAI.git) **From PyPI** ```bash pip install colossalai ``` **From source** ```bash git clone https://github.com/hpcaitech/ColossalAI.git cd ColossalAI # install colossalai pip install . ``` ## Dataset for Teyvat BLIP captions Dataset used to train [Teyvat characters text to image model](https://github.com/hpcaitech/ColossalAI/tree/main/examples/images/diffusion). BLIP generated captions for characters images from [genshin-impact fandom wiki](https://genshin-impact.fandom.com/wiki/Character#Playable_Characters)and [biligame wiki for genshin impact](https://wiki.biligame.com/ys/%E8%A7%92%E8%89%B2). For each row the dataset contains `image` and `text` keys. `image` is a varying size PIL png, and `text` is the accompanying text caption. Only a train split is provided. The `text` include the tag `Teyvat`, `Name`,`Element`, `Weapon`, `Region`, `Model type`, and `Description`, the `Description` is captioned with the [pre-trained BLIP model](https://github.com/salesforce/BLIP). ## Training The arguement `placement` can be `cpu`, `auto`, `cuda`, with `cpu` the GPU RAM required can be minimized to 4GB but will deceleration, with `cuda` you can also reduce GPU memory by half but accelerated training, with `auto` a more balanced solution for speed and memory can be obtained。 **___Note: Change the `resolution` to 768 if you are using the [stable-diffusion-2](https://huggingface.co/stabilityai/stable-diffusion-2) 768x768 model.___** ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export INSTANCE_DIR="path-to-instance-images" export OUTPUT_DIR="path-to-save-model" torchrun --nproc_per_node 2 train_dreambooth_colossalai.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --output_dir=$OUTPUT_DIR \ --instance_prompt="a photo of sks dog" \ --resolution=512 \ --train_batch_size=1 \ --learning_rate=5e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --max_train_steps=400 \ --placement="cuda" ``` ### Training with prior-preservation loss Prior-preservation is used to avoid overfitting and language-drift. Refer to the paper to learn more about it. For prior-preservation we first generate images using the model with a class prompt and then use those during training along with our data. According to the paper, it's recommended to generate `num_epochs * num_samples` images for prior-preservation. 200-300 works well for most cases. The `num_class_images` flag sets the number of images to generate with the class prompt. You can place existing images in `class_data_dir`, and the training script will generate any additional images so that `num_class_images` are present in `class_data_dir` during training time. ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export INSTANCE_DIR="path-to-instance-images" export CLASS_DIR="path-to-class-images" export OUTPUT_DIR="path-to-save-model" torchrun --nproc_per_node 2 train_dreambooth_colossalai.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir=$CLASS_DIR \ --output_dir=$OUTPUT_DIR \ --with_prior_preservation --prior_loss_weight=1.0 \ --instance_prompt="a photo of sks dog" \ --class_prompt="a photo of dog" \ --resolution=512 \ --train_batch_size=1 \ --learning_rate=5e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --max_train_steps=800 \ --placement="cuda" ``` ## Inference Once you have trained a model using above command, the inference can be done simply using the `StableDiffusionPipeline`. Make sure to include the `identifier`(e.g. sks in above example) in your prompt. ```python from diffusers import StableDiffusionPipeline import torch model_id = "path-to-save-model" pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") prompt = "A photo of sks dog in a bucket" image = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] image.save("dog-bucket.png") ```
0
hf_public_repos/diffusers/examples/research_projects
hf_public_repos/diffusers/examples/research_projects/colossalai/train_dreambooth_colossalai.py
import argparse import hashlib import math import os from pathlib import Path import colossalai import torch import torch.nn.functional as F import torch.utils.checkpoint from colossalai.context.parallel_mode import ParallelMode from colossalai.core import global_context as gpc from colossalai.logging import disable_existing_loggers, get_dist_logger from colossalai.nn.optimizer.gemini_optimizer import GeminiAdamOptimizer from colossalai.nn.parallel.utils import get_static_torch_model from colossalai.utils import get_current_device from colossalai.utils.model.colo_init_context import ColoInitContext from huggingface_hub import create_repo, upload_folder from PIL import Image from torch.utils.data import Dataset from torchvision import transforms from tqdm.auto import tqdm from transformers import AutoTokenizer, PretrainedConfig from diffusers import AutoencoderKL, DDPMScheduler, DiffusionPipeline, UNet2DConditionModel from diffusers.optimization import get_scheduler disable_existing_loggers() logger = get_dist_logger() def import_model_class_from_model_name_or_path(pretrained_model_name_or_path: str): text_encoder_config = PretrainedConfig.from_pretrained( pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, ) model_class = text_encoder_config.architectures[0] if model_class == "CLIPTextModel": from transformers import CLIPTextModel return CLIPTextModel elif model_class == "RobertaSeriesModelWithTransformation": from diffusers.pipelines.alt_diffusion.modeling_roberta_series import RobertaSeriesModelWithTransformation return RobertaSeriesModelWithTransformation else: raise ValueError(f"{model_class} is not supported.") def parse_args(input_args=None): parser = argparse.ArgumentParser(description="Simple example of a training script.") parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--instance_data_dir", type=str, default=None, required=True, help="A folder containing the training data of instance images.", ) parser.add_argument( "--class_data_dir", type=str, default=None, required=False, help="A folder containing the training data of class images.", ) parser.add_argument( "--instance_prompt", type=str, default="a photo of sks dog", required=False, help="The prompt with identifier specifying the instance", ) parser.add_argument( "--class_prompt", type=str, default=None, help="The prompt to specify images in the same class as provided instance images.", ) parser.add_argument( "--with_prior_preservation", default=False, action="store_true", help="Flag to add prior preservation loss.", ) parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.") parser.add_argument( "--num_class_images", type=int, default=100, help=( "Minimal class images for prior preservation loss. If there are not enough images already present in" " class_data_dir, additional images will be sampled with class_prompt." ), ) parser.add_argument( "--output_dir", type=str, default="text-inversion-model", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--placement", type=str, default="cpu", help="Placement Policy for Gemini. Valid when using colossalai as dist plan.", ) parser.add_argument( "--center_crop", default=False, action="store_true", help=( "Whether to center crop the input images to the resolution. If not set, the images will be randomly" " cropped. The images will be resized to the resolution first before cropping." ), ) parser.add_argument( "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader." ) parser.add_argument( "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images." ) parser.add_argument("--num_train_epochs", type=int, default=1) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument("--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.") parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--learning_rate", type=float, default=5e-6, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." ) parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") if input_args is not None: args = parser.parse_args(input_args) else: args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank if args.with_prior_preservation: if args.class_data_dir is None: raise ValueError("You must specify a data directory for class images.") if args.class_prompt is None: raise ValueError("You must specify prompt for class images.") else: if args.class_data_dir is not None: logger.warning("You need not use --class_data_dir without --with_prior_preservation.") if args.class_prompt is not None: logger.warning("You need not use --class_prompt without --with_prior_preservation.") return args class DreamBoothDataset(Dataset): """ A dataset to prepare the instance and class images with the prompts for fine-tuning the model. It pre-processes the images and the tokenizes prompts. """ def __init__( self, instance_data_root, instance_prompt, tokenizer, class_data_root=None, class_prompt=None, size=512, center_crop=False, ): self.size = size self.center_crop = center_crop self.tokenizer = tokenizer self.instance_data_root = Path(instance_data_root) if not self.instance_data_root.exists(): raise ValueError("Instance images root doesn't exists.") self.instance_images_path = list(Path(instance_data_root).iterdir()) self.num_instance_images = len(self.instance_images_path) self.instance_prompt = instance_prompt self._length = self.num_instance_images if class_data_root is not None: self.class_data_root = Path(class_data_root) self.class_data_root.mkdir(parents=True, exist_ok=True) self.class_images_path = list(self.class_data_root.iterdir()) self.num_class_images = len(self.class_images_path) self._length = max(self.num_class_images, self.num_instance_images) self.class_prompt = class_prompt else: self.class_data_root = None self.image_transforms = transforms.Compose( [ transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR), transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def __len__(self): return self._length def __getitem__(self, index): example = {} instance_image = Image.open(self.instance_images_path[index % self.num_instance_images]) if not instance_image.mode == "RGB": instance_image = instance_image.convert("RGB") example["instance_images"] = self.image_transforms(instance_image) example["instance_prompt_ids"] = self.tokenizer( self.instance_prompt, padding="do_not_pad", truncation=True, max_length=self.tokenizer.model_max_length, ).input_ids if self.class_data_root: class_image = Image.open(self.class_images_path[index % self.num_class_images]) if not class_image.mode == "RGB": class_image = class_image.convert("RGB") example["class_images"] = self.image_transforms(class_image) example["class_prompt_ids"] = self.tokenizer( self.class_prompt, padding="do_not_pad", truncation=True, max_length=self.tokenizer.model_max_length, ).input_ids return example class PromptDataset(Dataset): "A simple dataset to prepare the prompts to generate class images on multiple GPUs." def __init__(self, prompt, num_samples): self.prompt = prompt self.num_samples = num_samples def __len__(self): return self.num_samples def __getitem__(self, index): example = {} example["prompt"] = self.prompt example["index"] = index return example # Gemini + ZeRO DDP def gemini_zero_dpp(model: torch.nn.Module, placememt_policy: str = "auto"): from colossalai.nn.parallel import GeminiDDP model = GeminiDDP( model, device=get_current_device(), placement_policy=placememt_policy, pin_memory=True, search_range_mb=64 ) return model def main(args): if args.seed is None: colossalai.launch_from_torch(config={}) else: colossalai.launch_from_torch(config={}, seed=args.seed) local_rank = gpc.get_local_rank(ParallelMode.DATA) world_size = gpc.get_world_size(ParallelMode.DATA) if args.with_prior_preservation: class_images_dir = Path(args.class_data_dir) if not class_images_dir.exists(): class_images_dir.mkdir(parents=True) cur_class_images = len(list(class_images_dir.iterdir())) if cur_class_images < args.num_class_images: torch_dtype = torch.float16 if get_current_device() == "cuda" else torch.float32 pipeline = DiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, torch_dtype=torch_dtype, safety_checker=None, revision=args.revision, ) pipeline.set_progress_bar_config(disable=True) num_new_images = args.num_class_images - cur_class_images logger.info(f"Number of class images to sample: {num_new_images}.") sample_dataset = PromptDataset(args.class_prompt, num_new_images) sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size) pipeline.to(get_current_device()) for example in tqdm( sample_dataloader, desc="Generating class images", disable=not local_rank == 0, ): images = pipeline(example["prompt"]).images for i, image in enumerate(images): hash_image = hashlib.sha1(image.tobytes()).hexdigest() image_filename = class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg" image.save(image_filename) del pipeline # Handle the repository creation if local_rank == 0: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token ).repo_id # Load the tokenizer if args.tokenizer_name: logger.info(f"Loading tokenizer from {args.tokenizer_name}", ranks=[0]) tokenizer = AutoTokenizer.from_pretrained( args.tokenizer_name, revision=args.revision, use_fast=False, ) elif args.pretrained_model_name_or_path: logger.info("Loading tokenizer from pretrained model", ranks=[0]) tokenizer = AutoTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision, use_fast=False, ) # import correct text encoder class text_encoder_cls = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path) # Load models and create wrapper for stable diffusion logger.info(f"Loading text_encoder from {args.pretrained_model_name_or_path}", ranks=[0]) text_encoder = text_encoder_cls.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, ) logger.info(f"Loading AutoencoderKL from {args.pretrained_model_name_or_path}", ranks=[0]) vae = AutoencoderKL.from_pretrained( args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision, ) logger.info(f"Loading UNet2DConditionModel from {args.pretrained_model_name_or_path}", ranks=[0]) with ColoInitContext(device=get_current_device()): unet = UNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision, low_cpu_mem_usage=False ) vae.requires_grad_(False) text_encoder.requires_grad_(False) if args.gradient_checkpointing: unet.enable_gradient_checkpointing() if args.scale_lr: args.learning_rate = args.learning_rate * args.train_batch_size * world_size unet = gemini_zero_dpp(unet, args.placement) # config optimizer for colossalai zero optimizer = GeminiAdamOptimizer( unet, lr=args.learning_rate, initial_scale=2**5, clipping_norm=args.max_grad_norm ) # load noise_scheduler noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") # prepare dataset logger.info(f"Prepare dataset from {args.instance_data_dir}", ranks=[0]) train_dataset = DreamBoothDataset( instance_data_root=args.instance_data_dir, instance_prompt=args.instance_prompt, class_data_root=args.class_data_dir if args.with_prior_preservation else None, class_prompt=args.class_prompt, tokenizer=tokenizer, size=args.resolution, center_crop=args.center_crop, ) def collate_fn(examples): input_ids = [example["instance_prompt_ids"] for example in examples] pixel_values = [example["instance_images"] for example in examples] # Concat class and instance examples for prior preservation. # We do this to avoid doing two forward passes. if args.with_prior_preservation: input_ids += [example["class_prompt_ids"] for example in examples] pixel_values += [example["class_images"] for example in examples] pixel_values = torch.stack(pixel_values) pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() input_ids = tokenizer.pad( {"input_ids": input_ids}, padding="max_length", max_length=tokenizer.model_max_length, return_tensors="pt", ).input_ids batch = { "input_ids": input_ids, "pixel_values": pixel_values, } return batch train_dataloader = torch.utils.data.DataLoader( train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn, num_workers=1 ) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader)) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps, ) weight_dtype = torch.float32 if args.mixed_precision == "fp16": weight_dtype = torch.float16 elif args.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # Move text_encode and vae to gpu. # For mixed precision training we cast the text_encoder and vae weights to half-precision # as these models are only used for inference, keeping weights in full precision is not required. vae.to(get_current_device(), dtype=weight_dtype) text_encoder.to(get_current_device(), dtype=weight_dtype) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader)) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # Train! total_batch_size = args.train_batch_size * world_size logger.info("***** Running training *****", ranks=[0]) logger.info(f" Num examples = {len(train_dataset)}", ranks=[0]) logger.info(f" Num batches each epoch = {len(train_dataloader)}", ranks=[0]) logger.info(f" Num Epochs = {args.num_train_epochs}", ranks=[0]) logger.info(f" Instantaneous batch size per device = {args.train_batch_size}", ranks=[0]) logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}", ranks=[0]) logger.info(f" Total optimization steps = {args.max_train_steps}", ranks=[0]) # Only show the progress bar once on each machine. progress_bar = tqdm(range(args.max_train_steps), disable=not local_rank == 0) progress_bar.set_description("Steps") global_step = 0 torch.cuda.synchronize() for epoch in range(args.num_train_epochs): unet.train() for step, batch in enumerate(train_dataloader): torch.cuda.reset_peak_memory_stats() # Move batch to gpu for key, value in batch.items(): batch[key] = value.to(get_current_device(), non_blocking=True) # Convert images to latent space optimizer.zero_grad() latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample() latents = latents * 0.18215 # Sample noise that we'll add to the latents noise = torch.randn_like(latents) bsz = latents.shape[0] # Sample a random timestep for each image timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device) timesteps = timesteps.long() # Add noise to the latents according to the noise magnitude at each timestep # (this is the forward diffusion process) noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) # Get the text embedding for conditioning encoder_hidden_states = text_encoder(batch["input_ids"])[0] # Predict the noise residual model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample # Get the target for loss depending on the prediction type if noise_scheduler.config.prediction_type == "epsilon": target = noise elif noise_scheduler.config.prediction_type == "v_prediction": target = noise_scheduler.get_velocity(latents, noise, timesteps) else: raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") if args.with_prior_preservation: # Chunk the noise and model_pred into two parts and compute the loss on each part separately. model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0) target, target_prior = torch.chunk(target, 2, dim=0) # Compute instance loss loss = F.mse_loss(model_pred.float(), target.float(), reduction="none").mean([1, 2, 3]).mean() # Compute prior loss prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean") # Add the prior loss to the instance loss. loss = loss + args.prior_loss_weight * prior_loss else: loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") optimizer.backward(loss) optimizer.step() lr_scheduler.step() logger.info(f"max GPU_mem cost is {torch.cuda.max_memory_allocated()/2**20} MB", ranks=[0]) # Checks if the accelerator has performed an optimization step behind the scenes progress_bar.update(1) global_step += 1 logs = { "loss": loss.detach().item(), "lr": optimizer.param_groups[0]["lr"], } # lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) if global_step % args.save_steps == 0: torch.cuda.synchronize() torch_unet = get_static_torch_model(unet) if local_rank == 0: pipeline = DiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, unet=torch_unet, revision=args.revision, ) save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") pipeline.save_pretrained(save_path) logger.info(f"Saving model checkpoint to {save_path}", ranks=[0]) if global_step >= args.max_train_steps: break torch.cuda.synchronize() unet = get_static_torch_model(unet) if local_rank == 0: pipeline = DiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, unet=unet, revision=args.revision, ) pipeline.save_pretrained(args.output_dir) logger.info(f"Saving model checkpoint to {args.output_dir}", ranks=[0]) if args.push_to_hub: upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) if __name__ == "__main__": args = parse_args() main(args)
0
hf_public_repos/diffusers/examples/research_projects
hf_public_repos/diffusers/examples/research_projects/colossalai/requirement.txt
diffusers torch torchvision ftfy tensorboard Jinja2 transformers
0
hf_public_repos/diffusers/examples/research_projects
hf_public_repos/diffusers/examples/research_projects/colossalai/inference.py
import torch from diffusers import StableDiffusionPipeline model_id = "path-to-your-trained-model" pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") prompt = "A photo of sks dog in a bucket" image = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] image.save("dog-bucket.png")
0
hf_public_repos/diffusers/examples/research_projects
hf_public_repos/diffusers/examples/research_projects/dreambooth_inpaint/requirements.txt
diffusers==0.9.0 accelerate>=0.16.0 torchvision transformers>=4.21.0 ftfy tensorboard Jinja2
0
hf_public_repos/diffusers/examples/research_projects
hf_public_repos/diffusers/examples/research_projects/dreambooth_inpaint/README.md
# Dreambooth for the inpainting model This script was added by @thedarkzeno . Please note that this script is not actively maintained, you can open an issue and tag @thedarkzeno or @patil-suraj though. ```bash export MODEL_NAME="runwayml/stable-diffusion-inpainting" export INSTANCE_DIR="path-to-instance-images" export OUTPUT_DIR="path-to-save-model" accelerate launch train_dreambooth_inpaint.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --output_dir=$OUTPUT_DIR \ --instance_prompt="a photo of sks dog" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=1 \ --learning_rate=5e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --max_train_steps=400 ``` ### Training with prior-preservation loss Prior-preservation is used to avoid overfitting and language-drift. Refer to the paper to learn more about it. For prior-preservation we first generate images using the model with a class prompt and then use those during training along with our data. According to the paper, it's recommended to generate `num_epochs * num_samples` images for prior-preservation. 200-300 works well for most cases. ```bash export MODEL_NAME="runwayml/stable-diffusion-inpainting" export INSTANCE_DIR="path-to-instance-images" export CLASS_DIR="path-to-class-images" export OUTPUT_DIR="path-to-save-model" accelerate launch train_dreambooth_inpaint.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir=$CLASS_DIR \ --output_dir=$OUTPUT_DIR \ --with_prior_preservation --prior_loss_weight=1.0 \ --instance_prompt="a photo of sks dog" \ --class_prompt="a photo of dog" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=1 \ --learning_rate=5e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --num_class_images=200 \ --max_train_steps=800 ``` ### Training with gradient checkpointing and 8-bit optimizer: With the help of gradient checkpointing and the 8-bit optimizer from bitsandbytes it's possible to run train dreambooth on a 16GB GPU. To install `bitandbytes` please refer to this [readme](https://github.com/TimDettmers/bitsandbytes#requirements--installation). ```bash export MODEL_NAME="runwayml/stable-diffusion-inpainting" export INSTANCE_DIR="path-to-instance-images" export CLASS_DIR="path-to-class-images" export OUTPUT_DIR="path-to-save-model" accelerate launch train_dreambooth_inpaint.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir=$CLASS_DIR \ --output_dir=$OUTPUT_DIR \ --with_prior_preservation --prior_loss_weight=1.0 \ --instance_prompt="a photo of sks dog" \ --class_prompt="a photo of dog" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=2 --gradient_checkpointing \ --use_8bit_adam \ --learning_rate=5e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --num_class_images=200 \ --max_train_steps=800 ``` ### Fine-tune text encoder with the UNet. The script also allows to fine-tune the `text_encoder` along with the `unet`. It's been observed experimentally that fine-tuning `text_encoder` gives much better results especially on faces. Pass the `--train_text_encoder` argument to the script to enable training `text_encoder`. ___Note: Training text encoder requires more memory, with this option the training won't fit on 16GB GPU. It needs at least 24GB VRAM.___ ```bash export MODEL_NAME="runwayml/stable-diffusion-inpainting" export INSTANCE_DIR="path-to-instance-images" export CLASS_DIR="path-to-class-images" export OUTPUT_DIR="path-to-save-model" accelerate launch train_dreambooth_inpaint.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --train_text_encoder \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir=$CLASS_DIR \ --output_dir=$OUTPUT_DIR \ --with_prior_preservation --prior_loss_weight=1.0 \ --instance_prompt="a photo of sks dog" \ --class_prompt="a photo of dog" \ --resolution=512 \ --train_batch_size=1 \ --use_8bit_adam \ --gradient_checkpointing \ --learning_rate=2e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --num_class_images=200 \ --max_train_steps=800 ```
0
hf_public_repos/diffusers/examples/research_projects
hf_public_repos/diffusers/examples/research_projects/dreambooth_inpaint/train_dreambooth_inpaint.py
import argparse import hashlib import itertools import math import os import random from pathlib import Path import numpy as np import torch import torch.nn.functional as F import torch.utils.checkpoint from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import ProjectConfiguration, set_seed from huggingface_hub import create_repo, upload_folder from PIL import Image, ImageDraw from torch.utils.data import Dataset from torchvision import transforms from tqdm.auto import tqdm from transformers import CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDPMScheduler, StableDiffusionInpaintPipeline, StableDiffusionPipeline, UNet2DConditionModel, ) from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.13.0.dev0") logger = get_logger(__name__) def prepare_mask_and_masked_image(image, mask): image = np.array(image.convert("RGB")) image = image[None].transpose(0, 3, 1, 2) image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 mask = np.array(mask.convert("L")) mask = mask.astype(np.float32) / 255.0 mask = mask[None, None] mask[mask < 0.5] = 0 mask[mask >= 0.5] = 1 mask = torch.from_numpy(mask) masked_image = image * (mask < 0.5) return mask, masked_image # generate random masks def random_mask(im_shape, ratio=1, mask_full_image=False): mask = Image.new("L", im_shape, 0) draw = ImageDraw.Draw(mask) size = (random.randint(0, int(im_shape[0] * ratio)), random.randint(0, int(im_shape[1] * ratio))) # use this to always mask the whole image if mask_full_image: size = (int(im_shape[0] * ratio), int(im_shape[1] * ratio)) limits = (im_shape[0] - size[0] // 2, im_shape[1] - size[1] // 2) center = (random.randint(size[0] // 2, limits[0]), random.randint(size[1] // 2, limits[1])) draw_type = random.randint(0, 1) if draw_type == 0 or mask_full_image: draw.rectangle( (center[0] - size[0] // 2, center[1] - size[1] // 2, center[0] + size[0] // 2, center[1] + size[1] // 2), fill=255, ) else: draw.ellipse( (center[0] - size[0] // 2, center[1] - size[1] // 2, center[0] + size[0] // 2, center[1] + size[1] // 2), fill=255, ) return mask def parse_args(): parser = argparse.ArgumentParser(description="Simple example of a training script.") parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--instance_data_dir", type=str, default=None, required=True, help="A folder containing the training data of instance images.", ) parser.add_argument( "--class_data_dir", type=str, default=None, required=False, help="A folder containing the training data of class images.", ) parser.add_argument( "--instance_prompt", type=str, default=None, help="The prompt with identifier specifying the instance", ) parser.add_argument( "--class_prompt", type=str, default=None, help="The prompt to specify images in the same class as provided instance images.", ) parser.add_argument( "--with_prior_preservation", default=False, action="store_true", help="Flag to add prior preservation loss.", ) parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.") parser.add_argument( "--num_class_images", type=int, default=100, help=( "Minimal class images for prior preservation loss. If not have enough images, additional images will be" " sampled with class_prompt." ), ) parser.add_argument( "--output_dir", type=str, default="text-inversion-model", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--center_crop", default=False, action="store_true", help=( "Whether to center crop the input images to the resolution. If not set, the images will be randomly" " cropped. The images will be resized to the resolution first before cropping." ), ) parser.add_argument("--train_text_encoder", action="store_true", help="Whether to train the text encoder") parser.add_argument( "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader." ) parser.add_argument( "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images." ) parser.add_argument("--num_train_epochs", type=int, default=1) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--learning_rate", type=float, default=5e-6, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--mixed_precision", type=str, default="no", choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. These checkpoints can be used both as final" " checkpoints in case they are better than the last checkpoint and are suitable for resuming training" " using `--resume_from_checkpoint`." ), ) parser.add_argument( "--checkpoints_total_limit", type=int, default=None, help=( "Max number of checkpoints to store. Passed as `total_limit` to the `Accelerator` `ProjectConfiguration`." " See Accelerator::save_state https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.save_state" " for more docs" ), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank if args.instance_data_dir is None: raise ValueError("You must specify a train data directory.") if args.with_prior_preservation: if args.class_data_dir is None: raise ValueError("You must specify a data directory for class images.") if args.class_prompt is None: raise ValueError("You must specify prompt for class images.") return args class DreamBoothDataset(Dataset): """ A dataset to prepare the instance and class images with the prompts for fine-tuning the model. It pre-processes the images and the tokenizes prompts. """ def __init__( self, instance_data_root, instance_prompt, tokenizer, class_data_root=None, class_prompt=None, size=512, center_crop=False, ): self.size = size self.center_crop = center_crop self.tokenizer = tokenizer self.instance_data_root = Path(instance_data_root) if not self.instance_data_root.exists(): raise ValueError("Instance images root doesn't exists.") self.instance_images_path = list(Path(instance_data_root).iterdir()) self.num_instance_images = len(self.instance_images_path) self.instance_prompt = instance_prompt self._length = self.num_instance_images if class_data_root is not None: self.class_data_root = Path(class_data_root) self.class_data_root.mkdir(parents=True, exist_ok=True) self.class_images_path = list(self.class_data_root.iterdir()) self.num_class_images = len(self.class_images_path) self._length = max(self.num_class_images, self.num_instance_images) self.class_prompt = class_prompt else: self.class_data_root = None self.image_transforms_resize_and_crop = transforms.Compose( [ transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR), transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size), ] ) self.image_transforms = transforms.Compose( [ transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def __len__(self): return self._length def __getitem__(self, index): example = {} instance_image = Image.open(self.instance_images_path[index % self.num_instance_images]) if not instance_image.mode == "RGB": instance_image = instance_image.convert("RGB") instance_image = self.image_transforms_resize_and_crop(instance_image) example["PIL_images"] = instance_image example["instance_images"] = self.image_transforms(instance_image) example["instance_prompt_ids"] = self.tokenizer( self.instance_prompt, padding="do_not_pad", truncation=True, max_length=self.tokenizer.model_max_length, ).input_ids if self.class_data_root: class_image = Image.open(self.class_images_path[index % self.num_class_images]) if not class_image.mode == "RGB": class_image = class_image.convert("RGB") class_image = self.image_transforms_resize_and_crop(class_image) example["class_images"] = self.image_transforms(class_image) example["class_PIL_images"] = class_image example["class_prompt_ids"] = self.tokenizer( self.class_prompt, padding="do_not_pad", truncation=True, max_length=self.tokenizer.model_max_length, ).input_ids return example class PromptDataset(Dataset): "A simple dataset to prepare the prompts to generate class images on multiple GPUs." def __init__(self, prompt, num_samples): self.prompt = prompt self.num_samples = num_samples def __len__(self): return self.num_samples def __getitem__(self, index): example = {} example["prompt"] = self.prompt example["index"] = index return example def main(): args = parse_args() logging_dir = Path(args.output_dir, args.logging_dir) project_config = ProjectConfiguration( total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir ) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with="tensorboard", project_config=project_config, ) # Currently, it's not possible to do gradient accumulation when training two models with accelerate.accumulate # This will be enabled soon in accelerate. For now, we don't allow gradient accumulation when training two models. # TODO (patil-suraj): Remove this check when gradient accumulation with two models is enabled in accelerate. if args.train_text_encoder and args.gradient_accumulation_steps > 1 and accelerator.num_processes > 1: raise ValueError( "Gradient accumulation is not supported when training the text encoder in distributed training. " "Please set gradient_accumulation_steps to 1. This feature will be supported in the future." ) if args.seed is not None: set_seed(args.seed) if args.with_prior_preservation: class_images_dir = Path(args.class_data_dir) if not class_images_dir.exists(): class_images_dir.mkdir(parents=True) cur_class_images = len(list(class_images_dir.iterdir())) if cur_class_images < args.num_class_images: torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32 pipeline = StableDiffusionInpaintPipeline.from_pretrained( args.pretrained_model_name_or_path, torch_dtype=torch_dtype, safety_checker=None ) pipeline.set_progress_bar_config(disable=True) num_new_images = args.num_class_images - cur_class_images logger.info(f"Number of class images to sample: {num_new_images}.") sample_dataset = PromptDataset(args.class_prompt, num_new_images) sample_dataloader = torch.utils.data.DataLoader( sample_dataset, batch_size=args.sample_batch_size, num_workers=1 ) sample_dataloader = accelerator.prepare(sample_dataloader) pipeline.to(accelerator.device) transform_to_pil = transforms.ToPILImage() for example in tqdm( sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process ): bsz = len(example["prompt"]) fake_images = torch.rand((3, args.resolution, args.resolution)) transform_to_pil = transforms.ToPILImage() fake_pil_images = transform_to_pil(fake_images) fake_mask = random_mask((args.resolution, args.resolution), ratio=1, mask_full_image=True) images = pipeline(prompt=example["prompt"], mask_image=fake_mask, image=fake_pil_images).images for i, image in enumerate(images): hash_image = hashlib.sha1(image.tobytes()).hexdigest() image_filename = class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg" image.save(image_filename) del pipeline if torch.cuda.is_available(): torch.cuda.empty_cache() # Handle the repository creation if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token ).repo_id # Load the tokenizer if args.tokenizer_name: tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name) elif args.pretrained_model_name_or_path: tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer") # Load models and create wrapper for stable diffusion text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder") vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae") unet = UNet2DConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet") vae.requires_grad_(False) if not args.train_text_encoder: text_encoder.requires_grad_(False) if args.gradient_checkpointing: unet.enable_gradient_checkpointing() if args.train_text_encoder: text_encoder.gradient_checkpointing_enable() if args.scale_lr: args.learning_rate = ( args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes ) # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs if args.use_8bit_adam: try: import bitsandbytes as bnb except ImportError: raise ImportError( "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." ) optimizer_class = bnb.optim.AdamW8bit else: optimizer_class = torch.optim.AdamW params_to_optimize = ( itertools.chain(unet.parameters(), text_encoder.parameters()) if args.train_text_encoder else unet.parameters() ) optimizer = optimizer_class( params_to_optimize, lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") train_dataset = DreamBoothDataset( instance_data_root=args.instance_data_dir, instance_prompt=args.instance_prompt, class_data_root=args.class_data_dir if args.with_prior_preservation else None, class_prompt=args.class_prompt, tokenizer=tokenizer, size=args.resolution, center_crop=args.center_crop, ) def collate_fn(examples): input_ids = [example["instance_prompt_ids"] for example in examples] pixel_values = [example["instance_images"] for example in examples] # Concat class and instance examples for prior preservation. # We do this to avoid doing two forward passes. if args.with_prior_preservation: input_ids += [example["class_prompt_ids"] for example in examples] pixel_values += [example["class_images"] for example in examples] pior_pil = [example["class_PIL_images"] for example in examples] masks = [] masked_images = [] for example in examples: pil_image = example["PIL_images"] # generate a random mask mask = random_mask(pil_image.size, 1, False) # prepare mask and masked image mask, masked_image = prepare_mask_and_masked_image(pil_image, mask) masks.append(mask) masked_images.append(masked_image) if args.with_prior_preservation: for pil_image in pior_pil: # generate a random mask mask = random_mask(pil_image.size, 1, False) # prepare mask and masked image mask, masked_image = prepare_mask_and_masked_image(pil_image, mask) masks.append(mask) masked_images.append(masked_image) pixel_values = torch.stack(pixel_values) pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() input_ids = tokenizer.pad({"input_ids": input_ids}, padding=True, return_tensors="pt").input_ids masks = torch.stack(masks) masked_images = torch.stack(masked_images) batch = {"input_ids": input_ids, "pixel_values": pixel_values, "masks": masks, "masked_images": masked_images} return batch train_dataloader = torch.utils.data.DataLoader( train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn ) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes, num_training_steps=args.max_train_steps * accelerator.num_processes, ) if args.train_text_encoder: unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( unet, text_encoder, optimizer, train_dataloader, lr_scheduler ) else: unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( unet, optimizer, train_dataloader, lr_scheduler ) accelerator.register_for_checkpointing(lr_scheduler) weight_dtype = torch.float32 if args.mixed_precision == "fp16": weight_dtype = torch.float16 elif args.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # Move text_encode and vae to gpu. # For mixed precision training we cast the text_encoder and vae weights to half-precision # as these models are only used for inference, keeping weights in full precision is not required. vae.to(accelerator.device, dtype=weight_dtype) if not args.train_text_encoder: text_encoder.to(accelerator.device, dtype=weight_dtype) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: accelerator.init_trackers("dreambooth", config=vars(args)) # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num batches each epoch = {len(train_dataloader)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 first_epoch = 0 if args.resume_from_checkpoint: if args.resume_from_checkpoint != "latest": path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = os.listdir(args.output_dir) dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) path = dirs[-1] if len(dirs) > 0 else None if path is None: accelerator.print( f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." ) args.resume_from_checkpoint = None else: accelerator.print(f"Resuming from checkpoint {path}") accelerator.load_state(os.path.join(args.output_dir, path)) global_step = int(path.split("-")[1]) resume_global_step = global_step * args.gradient_accumulation_steps first_epoch = global_step // num_update_steps_per_epoch resume_step = resume_global_step % (num_update_steps_per_epoch * args.gradient_accumulation_steps) # Only show the progress bar once on each machine. progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process) progress_bar.set_description("Steps") for epoch in range(first_epoch, args.num_train_epochs): unet.train() for step, batch in enumerate(train_dataloader): # Skip steps until we reach the resumed step if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step: if step % args.gradient_accumulation_steps == 0: progress_bar.update(1) continue with accelerator.accumulate(unet): # Convert images to latent space latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample() latents = latents * vae.config.scaling_factor # Convert masked images to latent space masked_latents = vae.encode( batch["masked_images"].reshape(batch["pixel_values"].shape).to(dtype=weight_dtype) ).latent_dist.sample() masked_latents = masked_latents * vae.config.scaling_factor masks = batch["masks"] # resize the mask to latents shape as we concatenate the mask to the latents mask = torch.stack( [ torch.nn.functional.interpolate(mask, size=(args.resolution // 8, args.resolution // 8)) for mask in masks ] ) mask = mask.reshape(-1, 1, args.resolution // 8, args.resolution // 8) # Sample noise that we'll add to the latents noise = torch.randn_like(latents) bsz = latents.shape[0] # Sample a random timestep for each image timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device) timesteps = timesteps.long() # Add noise to the latents according to the noise magnitude at each timestep # (this is the forward diffusion process) noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) # concatenate the noised latents with the mask and the masked latents latent_model_input = torch.cat([noisy_latents, mask, masked_latents], dim=1) # Get the text embedding for conditioning encoder_hidden_states = text_encoder(batch["input_ids"])[0] # Predict the noise residual noise_pred = unet(latent_model_input, timesteps, encoder_hidden_states).sample # Get the target for loss depending on the prediction type if noise_scheduler.config.prediction_type == "epsilon": target = noise elif noise_scheduler.config.prediction_type == "v_prediction": target = noise_scheduler.get_velocity(latents, noise, timesteps) else: raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") if args.with_prior_preservation: # Chunk the noise and noise_pred into two parts and compute the loss on each part separately. noise_pred, noise_pred_prior = torch.chunk(noise_pred, 2, dim=0) target, target_prior = torch.chunk(target, 2, dim=0) # Compute instance loss loss = F.mse_loss(noise_pred.float(), target.float(), reduction="none").mean([1, 2, 3]).mean() # Compute prior loss prior_loss = F.mse_loss(noise_pred_prior.float(), target_prior.float(), reduction="mean") # Add the prior loss to the instance loss. loss = loss + args.prior_loss_weight * prior_loss else: loss = F.mse_loss(noise_pred.float(), target.float(), reduction="mean") accelerator.backward(loss) if accelerator.sync_gradients: params_to_clip = ( itertools.chain(unet.parameters(), text_encoder.parameters()) if args.train_text_encoder else unet.parameters() ) accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: progress_bar.update(1) global_step += 1 if global_step % args.checkpointing_steps == 0: if accelerator.is_main_process: save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") accelerator.save_state(save_path) logger.info(f"Saved state to {save_path}") logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) accelerator.log(logs, step=global_step) if global_step >= args.max_train_steps: break accelerator.wait_for_everyone() # Create the pipeline using using the trained modules and save it. if accelerator.is_main_process: pipeline = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, unet=accelerator.unwrap_model(unet), text_encoder=accelerator.unwrap_model(text_encoder), ) pipeline.save_pretrained(args.output_dir) if args.push_to_hub: upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) accelerator.end_training() if __name__ == "__main__": main()
0
hf_public_repos/diffusers/examples/research_projects
hf_public_repos/diffusers/examples/research_projects/dreambooth_inpaint/train_dreambooth_inpaint_lora.py
import argparse import hashlib import math import os import random from pathlib import Path import numpy as np import torch import torch.nn.functional as F import torch.utils.checkpoint from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import ProjectConfiguration, set_seed from huggingface_hub import create_repo, upload_folder from PIL import Image, ImageDraw from torch.utils.data import Dataset from torchvision import transforms from tqdm.auto import tqdm from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionInpaintPipeline, UNet2DConditionModel from diffusers.loaders import AttnProcsLayers from diffusers.models.attention_processor import LoRAAttnProcessor from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version from diffusers.utils.import_utils import is_xformers_available # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.13.0.dev0") logger = get_logger(__name__) def prepare_mask_and_masked_image(image, mask): image = np.array(image.convert("RGB")) image = image[None].transpose(0, 3, 1, 2) image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 mask = np.array(mask.convert("L")) mask = mask.astype(np.float32) / 255.0 mask = mask[None, None] mask[mask < 0.5] = 0 mask[mask >= 0.5] = 1 mask = torch.from_numpy(mask) masked_image = image * (mask < 0.5) return mask, masked_image # generate random masks def random_mask(im_shape, ratio=1, mask_full_image=False): mask = Image.new("L", im_shape, 0) draw = ImageDraw.Draw(mask) size = (random.randint(0, int(im_shape[0] * ratio)), random.randint(0, int(im_shape[1] * ratio))) # use this to always mask the whole image if mask_full_image: size = (int(im_shape[0] * ratio), int(im_shape[1] * ratio)) limits = (im_shape[0] - size[0] // 2, im_shape[1] - size[1] // 2) center = (random.randint(size[0] // 2, limits[0]), random.randint(size[1] // 2, limits[1])) draw_type = random.randint(0, 1) if draw_type == 0 or mask_full_image: draw.rectangle( (center[0] - size[0] // 2, center[1] - size[1] // 2, center[0] + size[0] // 2, center[1] + size[1] // 2), fill=255, ) else: draw.ellipse( (center[0] - size[0] // 2, center[1] - size[1] // 2, center[0] + size[0] // 2, center[1] + size[1] // 2), fill=255, ) return mask def parse_args(): parser = argparse.ArgumentParser(description="Simple example of a training script.") parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--instance_data_dir", type=str, default=None, required=True, help="A folder containing the training data of instance images.", ) parser.add_argument( "--class_data_dir", type=str, default=None, required=False, help="A folder containing the training data of class images.", ) parser.add_argument( "--instance_prompt", type=str, default=None, help="The prompt with identifier specifying the instance", ) parser.add_argument( "--class_prompt", type=str, default=None, help="The prompt to specify images in the same class as provided instance images.", ) parser.add_argument( "--with_prior_preservation", default=False, action="store_true", help="Flag to add prior preservation loss.", ) parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.") parser.add_argument( "--num_class_images", type=int, default=100, help=( "Minimal class images for prior preservation loss. If not have enough images, additional images will be" " sampled with class_prompt." ), ) parser.add_argument( "--output_dir", type=str, default="dreambooth-inpaint-model", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--center_crop", default=False, action="store_true", help=( "Whether to center crop the input images to the resolution. If not set, the images will be randomly" " cropped. The images will be resized to the resolution first before cropping." ), ) parser.add_argument("--train_text_encoder", action="store_true", help="Whether to train the text encoder") parser.add_argument( "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader." ) parser.add_argument( "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images." ) parser.add_argument("--num_train_epochs", type=int, default=1) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--learning_rate", type=float, default=5e-6, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--mixed_precision", type=str, default="no", choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. These checkpoints can be used both as final" " checkpoints in case they are better than the last checkpoint and are suitable for resuming training" " using `--resume_from_checkpoint`." ), ) parser.add_argument( "--checkpoints_total_limit", type=int, default=None, help=( "Max number of checkpoints to store. Passed as `total_limit` to the `Accelerator` `ProjectConfiguration`." " See Accelerator::save_state https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.save_state" " for more docs" ), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) parser.add_argument( "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." ) args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank if args.instance_data_dir is None: raise ValueError("You must specify a train data directory.") if args.with_prior_preservation: if args.class_data_dir is None: raise ValueError("You must specify a data directory for class images.") if args.class_prompt is None: raise ValueError("You must specify prompt for class images.") return args class DreamBoothDataset(Dataset): """ A dataset to prepare the instance and class images with the prompts for fine-tuning the model. It pre-processes the images and the tokenizes prompts. """ def __init__( self, instance_data_root, instance_prompt, tokenizer, class_data_root=None, class_prompt=None, size=512, center_crop=False, ): self.size = size self.center_crop = center_crop self.tokenizer = tokenizer self.instance_data_root = Path(instance_data_root) if not self.instance_data_root.exists(): raise ValueError("Instance images root doesn't exists.") self.instance_images_path = list(Path(instance_data_root).iterdir()) self.num_instance_images = len(self.instance_images_path) self.instance_prompt = instance_prompt self._length = self.num_instance_images if class_data_root is not None: self.class_data_root = Path(class_data_root) self.class_data_root.mkdir(parents=True, exist_ok=True) self.class_images_path = list(self.class_data_root.iterdir()) self.num_class_images = len(self.class_images_path) self._length = max(self.num_class_images, self.num_instance_images) self.class_prompt = class_prompt else: self.class_data_root = None self.image_transforms_resize_and_crop = transforms.Compose( [ transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR), transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size), ] ) self.image_transforms = transforms.Compose( [ transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def __len__(self): return self._length def __getitem__(self, index): example = {} instance_image = Image.open(self.instance_images_path[index % self.num_instance_images]) if not instance_image.mode == "RGB": instance_image = instance_image.convert("RGB") instance_image = self.image_transforms_resize_and_crop(instance_image) example["PIL_images"] = instance_image example["instance_images"] = self.image_transforms(instance_image) example["instance_prompt_ids"] = self.tokenizer( self.instance_prompt, padding="do_not_pad", truncation=True, max_length=self.tokenizer.model_max_length, ).input_ids if self.class_data_root: class_image = Image.open(self.class_images_path[index % self.num_class_images]) if not class_image.mode == "RGB": class_image = class_image.convert("RGB") class_image = self.image_transforms_resize_and_crop(class_image) example["class_images"] = self.image_transforms(class_image) example["class_PIL_images"] = class_image example["class_prompt_ids"] = self.tokenizer( self.class_prompt, padding="do_not_pad", truncation=True, max_length=self.tokenizer.model_max_length, ).input_ids return example class PromptDataset(Dataset): "A simple dataset to prepare the prompts to generate class images on multiple GPUs." def __init__(self, prompt, num_samples): self.prompt = prompt self.num_samples = num_samples def __len__(self): return self.num_samples def __getitem__(self, index): example = {} example["prompt"] = self.prompt example["index"] = index return example def main(): args = parse_args() logging_dir = Path(args.output_dir, args.logging_dir) accelerator_project_config = ProjectConfiguration( total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir ) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with="tensorboard", project_config=accelerator_project_config, ) # Currently, it's not possible to do gradient accumulation when training two models with accelerate.accumulate # This will be enabled soon in accelerate. For now, we don't allow gradient accumulation when training two models. # TODO (patil-suraj): Remove this check when gradient accumulation with two models is enabled in accelerate. if args.train_text_encoder and args.gradient_accumulation_steps > 1 and accelerator.num_processes > 1: raise ValueError( "Gradient accumulation is not supported when training the text encoder in distributed training. " "Please set gradient_accumulation_steps to 1. This feature will be supported in the future." ) if args.seed is not None: set_seed(args.seed) if args.with_prior_preservation: class_images_dir = Path(args.class_data_dir) if not class_images_dir.exists(): class_images_dir.mkdir(parents=True) cur_class_images = len(list(class_images_dir.iterdir())) if cur_class_images < args.num_class_images: torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32 pipeline = StableDiffusionInpaintPipeline.from_pretrained( args.pretrained_model_name_or_path, torch_dtype=torch_dtype, safety_checker=None ) pipeline.set_progress_bar_config(disable=True) num_new_images = args.num_class_images - cur_class_images logger.info(f"Number of class images to sample: {num_new_images}.") sample_dataset = PromptDataset(args.class_prompt, num_new_images) sample_dataloader = torch.utils.data.DataLoader( sample_dataset, batch_size=args.sample_batch_size, num_workers=1 ) sample_dataloader = accelerator.prepare(sample_dataloader) pipeline.to(accelerator.device) transform_to_pil = transforms.ToPILImage() for example in tqdm( sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process ): bsz = len(example["prompt"]) fake_images = torch.rand((3, args.resolution, args.resolution)) transform_to_pil = transforms.ToPILImage() fake_pil_images = transform_to_pil(fake_images) fake_mask = random_mask((args.resolution, args.resolution), ratio=1, mask_full_image=True) images = pipeline(prompt=example["prompt"], mask_image=fake_mask, image=fake_pil_images).images for i, image in enumerate(images): hash_image = hashlib.sha1(image.tobytes()).hexdigest() image_filename = class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg" image.save(image_filename) del pipeline if torch.cuda.is_available(): torch.cuda.empty_cache() # Handle the repository creation if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token ).repo_id # Load the tokenizer if args.tokenizer_name: tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name) elif args.pretrained_model_name_or_path: tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer") # Load models and create wrapper for stable diffusion text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder") vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae") unet = UNet2DConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet") # We only train the additional adapter LoRA layers vae.requires_grad_(False) text_encoder.requires_grad_(False) unet.requires_grad_(False) weight_dtype = torch.float32 if args.mixed_precision == "fp16": weight_dtype = torch.float16 elif args.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # Move text_encode and vae to gpu. # For mixed precision training we cast the text_encoder and vae weights to half-precision # as these models are only used for inference, keeping weights in full precision is not required. unet.to(accelerator.device, dtype=weight_dtype) vae.to(accelerator.device, dtype=weight_dtype) text_encoder.to(accelerator.device, dtype=weight_dtype) if args.enable_xformers_memory_efficient_attention: if is_xformers_available(): unet.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") # now we will add new LoRA weights to the attention layers # It's important to realize here how many attention weights will be added and of which sizes # The sizes of the attention layers consist only of two different variables: # 1) - the "hidden_size", which is increased according to `unet.config.block_out_channels`. # 2) - the "cross attention size", which is set to `unet.config.cross_attention_dim`. # Let's first see how many attention processors we will have to set. # For Stable Diffusion, it should be equal to: # - down blocks (2x attention layers) * (2x transformer layers) * (3x down blocks) = 12 # - mid blocks (2x attention layers) * (1x transformer layers) * (1x mid blocks) = 2 # - up blocks (2x attention layers) * (3x transformer layers) * (3x down blocks) = 18 # => 32 layers # Set correct lora layers lora_attn_procs = {} for name in unet.attn_processors.keys(): cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim if name.startswith("mid_block"): hidden_size = unet.config.block_out_channels[-1] elif name.startswith("up_blocks"): block_id = int(name[len("up_blocks.")]) hidden_size = list(reversed(unet.config.block_out_channels))[block_id] elif name.startswith("down_blocks"): block_id = int(name[len("down_blocks.")]) hidden_size = unet.config.block_out_channels[block_id] lora_attn_procs[name] = LoRAAttnProcessor(hidden_size=hidden_size, cross_attention_dim=cross_attention_dim) unet.set_attn_processor(lora_attn_procs) lora_layers = AttnProcsLayers(unet.attn_processors) accelerator.register_for_checkpointing(lora_layers) if args.scale_lr: args.learning_rate = ( args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes ) # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs if args.use_8bit_adam: try: import bitsandbytes as bnb except ImportError: raise ImportError( "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." ) optimizer_class = bnb.optim.AdamW8bit else: optimizer_class = torch.optim.AdamW optimizer = optimizer_class( lora_layers.parameters(), lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") train_dataset = DreamBoothDataset( instance_data_root=args.instance_data_dir, instance_prompt=args.instance_prompt, class_data_root=args.class_data_dir if args.with_prior_preservation else None, class_prompt=args.class_prompt, tokenizer=tokenizer, size=args.resolution, center_crop=args.center_crop, ) def collate_fn(examples): input_ids = [example["instance_prompt_ids"] for example in examples] pixel_values = [example["instance_images"] for example in examples] # Concat class and instance examples for prior preservation. # We do this to avoid doing two forward passes. if args.with_prior_preservation: input_ids += [example["class_prompt_ids"] for example in examples] pixel_values += [example["class_images"] for example in examples] pior_pil = [example["class_PIL_images"] for example in examples] masks = [] masked_images = [] for example in examples: pil_image = example["PIL_images"] # generate a random mask mask = random_mask(pil_image.size, 1, False) # prepare mask and masked image mask, masked_image = prepare_mask_and_masked_image(pil_image, mask) masks.append(mask) masked_images.append(masked_image) if args.with_prior_preservation: for pil_image in pior_pil: # generate a random mask mask = random_mask(pil_image.size, 1, False) # prepare mask and masked image mask, masked_image = prepare_mask_and_masked_image(pil_image, mask) masks.append(mask) masked_images.append(masked_image) pixel_values = torch.stack(pixel_values) pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() input_ids = tokenizer.pad({"input_ids": input_ids}, padding=True, return_tensors="pt").input_ids masks = torch.stack(masks) masked_images = torch.stack(masked_images) batch = {"input_ids": input_ids, "pixel_values": pixel_values, "masks": masks, "masked_images": masked_images} return batch train_dataloader = torch.utils.data.DataLoader( train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn ) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes, num_training_steps=args.max_train_steps * accelerator.num_processes, ) # Prepare everything with our `accelerator`. lora_layers, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( lora_layers, optimizer, train_dataloader, lr_scheduler ) # accelerator.register_for_checkpointing(lr_scheduler) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: accelerator.init_trackers("dreambooth-inpaint-lora", config=vars(args)) # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num batches each epoch = {len(train_dataloader)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 first_epoch = 0 if args.resume_from_checkpoint: if args.resume_from_checkpoint != "latest": path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = os.listdir(args.output_dir) dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) path = dirs[-1] if len(dirs) > 0 else None if path is None: accelerator.print( f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." ) args.resume_from_checkpoint = None else: accelerator.print(f"Resuming from checkpoint {path}") accelerator.load_state(os.path.join(args.output_dir, path)) global_step = int(path.split("-")[1]) resume_global_step = global_step * args.gradient_accumulation_steps first_epoch = global_step // num_update_steps_per_epoch resume_step = resume_global_step % (num_update_steps_per_epoch * args.gradient_accumulation_steps) # Only show the progress bar once on each machine. progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process) progress_bar.set_description("Steps") for epoch in range(first_epoch, args.num_train_epochs): unet.train() for step, batch in enumerate(train_dataloader): # Skip steps until we reach the resumed step if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step: if step % args.gradient_accumulation_steps == 0: progress_bar.update(1) continue with accelerator.accumulate(unet): # Convert images to latent space latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample() latents = latents * vae.config.scaling_factor # Convert masked images to latent space masked_latents = vae.encode( batch["masked_images"].reshape(batch["pixel_values"].shape).to(dtype=weight_dtype) ).latent_dist.sample() masked_latents = masked_latents * vae.config.scaling_factor masks = batch["masks"] # resize the mask to latents shape as we concatenate the mask to the latents mask = torch.stack( [ torch.nn.functional.interpolate(mask, size=(args.resolution // 8, args.resolution // 8)) for mask in masks ] ).to(dtype=weight_dtype) mask = mask.reshape(-1, 1, args.resolution // 8, args.resolution // 8) # Sample noise that we'll add to the latents noise = torch.randn_like(latents) bsz = latents.shape[0] # Sample a random timestep for each image timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device) timesteps = timesteps.long() # Add noise to the latents according to the noise magnitude at each timestep # (this is the forward diffusion process) noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) # concatenate the noised latents with the mask and the masked latents latent_model_input = torch.cat([noisy_latents, mask, masked_latents], dim=1) # Get the text embedding for conditioning encoder_hidden_states = text_encoder(batch["input_ids"])[0] # Predict the noise residual noise_pred = unet(latent_model_input, timesteps, encoder_hidden_states).sample # Get the target for loss depending on the prediction type if noise_scheduler.config.prediction_type == "epsilon": target = noise elif noise_scheduler.config.prediction_type == "v_prediction": target = noise_scheduler.get_velocity(latents, noise, timesteps) else: raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") if args.with_prior_preservation: # Chunk the noise and noise_pred into two parts and compute the loss on each part separately. noise_pred, noise_pred_prior = torch.chunk(noise_pred, 2, dim=0) target, target_prior = torch.chunk(target, 2, dim=0) # Compute instance loss loss = F.mse_loss(noise_pred.float(), target.float(), reduction="none").mean([1, 2, 3]).mean() # Compute prior loss prior_loss = F.mse_loss(noise_pred_prior.float(), target_prior.float(), reduction="mean") # Add the prior loss to the instance loss. loss = loss + args.prior_loss_weight * prior_loss else: loss = F.mse_loss(noise_pred.float(), target.float(), reduction="mean") accelerator.backward(loss) if accelerator.sync_gradients: params_to_clip = lora_layers.parameters() accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: progress_bar.update(1) global_step += 1 if global_step % args.checkpointing_steps == 0: if accelerator.is_main_process: save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") accelerator.save_state(save_path) logger.info(f"Saved state to {save_path}") logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) accelerator.log(logs, step=global_step) if global_step >= args.max_train_steps: break accelerator.wait_for_everyone() # Save the lora layers if accelerator.is_main_process: unet = unet.to(torch.float32) unet.save_attn_procs(args.output_dir) if args.push_to_hub: upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) accelerator.end_training() if __name__ == "__main__": main()
0
hf_public_repos/diffusers/examples/research_projects
hf_public_repos/diffusers/examples/research_projects/onnxruntime/README.md
## Diffusers examples with ONNXRuntime optimizations **This research project is not actively maintained by the diffusers team. For any questions or comments, please contact Prathik Rao (prathikr), Sunghoon Choi (hanbitmyths), Ashwini Khade (askhade), or Peng Wang (pengwa) on github with any questions.** This aims to provide diffusers examples with ONNXRuntime optimizations for training/fine-tuning unconditional image generation, text to image, and textual inversion. Please see individual directories for more details on how to run each task using ONNXRuntime.
0
hf_public_repos/diffusers/examples/research_projects/onnxruntime
hf_public_repos/diffusers/examples/research_projects/onnxruntime/textual_inversion/requirements.txt
accelerate>=0.16.0 torchvision transformers>=4.25.1 ftfy tensorboard modelcards
0
hf_public_repos/diffusers/examples/research_projects/onnxruntime
hf_public_repos/diffusers/examples/research_projects/onnxruntime/textual_inversion/README.md
## Textual Inversion fine-tuning example [Textual inversion](https://arxiv.org/abs/2208.01618) is a method to personalize text2image models like stable diffusion on your own images using just 3-5 examples. The `textual_inversion.py` script shows how to implement the training procedure and adapt it for stable diffusion. ## Running on Colab Colab for training [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb) Colab for inference [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb) ## Running locally with PyTorch ### Installing the dependencies Before running the scripts, make sure to install the library's training dependencies: **Important** To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install . ``` Then cd in the example folder and run ```bash pip install -r requirements.txt ``` And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with: ```bash accelerate config ``` ### Cat toy example You need to accept the model license before downloading or using the weights. In this example we'll use model version `v1-5`, so you'll need to visit [its card](https://huggingface.co/runwayml/stable-diffusion-v1-5), read the license and tick the checkbox if you agree. You have to be a registered user in 🤗 Hugging Face Hub, and you'll also need to use an access token for the code to work. For more information on access tokens, please refer to [this section of the documentation](https://huggingface.co/docs/hub/security-tokens). Run the following command to authenticate your token ```bash huggingface-cli login ``` If you have already cloned the repo, then you won't need to go through these steps. <br> Now let's get our dataset. For this example we will use some cat images: https://huggingface.co/datasets/diffusers/cat_toy_example . Let's first download it locally: ```py from huggingface_hub import snapshot_download local_dir = "./cat" snapshot_download("diffusers/cat_toy_example", local_dir=local_dir, repo_type="dataset", ignore_patterns=".gitattributes") ``` This will be our training data. Now we can launch the training using ## Use ONNXRuntime to accelerate training In order to leverage onnxruntime to accelerate training, please use textual_inversion.py The command to train on custom data with onnxruntime: ```bash export MODEL_NAME="runwayml/stable-diffusion-v1-5" export DATA_DIR="path-to-dir-containing-images" accelerate launch textual_inversion.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --train_data_dir=$DATA_DIR \ --learnable_property="object" \ --placeholder_token="<cat-toy>" --initializer_token="toy" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --max_train_steps=3000 \ --learning_rate=5.0e-04 --scale_lr \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --output_dir="textual_inversion_cat" ``` Please contact Prathik Rao (prathikr), Sunghoon Choi (hanbitmyths), Ashwini Khade (askhade), or Peng Wang (pengwa) on github with any questions.
0
hf_public_repos/diffusers/examples/research_projects/onnxruntime
hf_public_repos/diffusers/examples/research_projects/onnxruntime/textual_inversion/textual_inversion.py
#!/usr/bin/env python # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and import argparse import logging import math import os import random import warnings from pathlib import Path import numpy as np import PIL import torch import torch.nn.functional as F import torch.utils.checkpoint import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import ProjectConfiguration, set_seed from huggingface_hub import create_repo, upload_folder from onnxruntime.training.optim.fp16_optimizer import FP16_Optimizer as ORT_FP16_Optimizer from onnxruntime.training.ortmodule import ORTModule # TODO: remove and import from diffusers.utils when the new version of diffusers is released from packaging import version from PIL import Image from torch.utils.data import Dataset from torchvision import transforms from tqdm.auto import tqdm from transformers import CLIPTextModel, CLIPTokenizer import diffusers from diffusers import ( AutoencoderKL, DDPMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, StableDiffusionPipeline, UNet2DConditionModel, ) from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version, is_wandb_available from diffusers.utils.import_utils import is_xformers_available if is_wandb_available(): import wandb if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"): PIL_INTERPOLATION = { "linear": PIL.Image.Resampling.BILINEAR, "bilinear": PIL.Image.Resampling.BILINEAR, "bicubic": PIL.Image.Resampling.BICUBIC, "lanczos": PIL.Image.Resampling.LANCZOS, "nearest": PIL.Image.Resampling.NEAREST, } else: PIL_INTERPOLATION = { "linear": PIL.Image.LINEAR, "bilinear": PIL.Image.BILINEAR, "bicubic": PIL.Image.BICUBIC, "lanczos": PIL.Image.LANCZOS, "nearest": PIL.Image.NEAREST, } # ------------------------------------------------------------------------------ # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.17.0.dev0") logger = get_logger(__name__) def save_model_card(repo_id: str, images=None, base_model=str, repo_folder=None): img_str = "" for i, image in enumerate(images): image.save(os.path.join(repo_folder, f"image_{i}.png")) img_str += f"![img_{i}](./image_{i}.png)\n" yaml = f""" --- license: creativeml-openrail-m base_model: {base_model} tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - diffusers - textual_inversion inference: true --- """ model_card = f""" # Textual inversion text2image fine-tuning - {repo_id} These are textual inversion adaption weights for {base_model}. You can find some example images in the following. \n {img_str} """ with open(os.path.join(repo_folder, "README.md"), "w") as f: f.write(yaml + model_card) def log_validation(text_encoder, tokenizer, unet, vae, args, accelerator, weight_dtype, epoch): logger.info( f"Running validation... \n Generating {args.num_validation_images} images with prompt:" f" {args.validation_prompt}." ) # create pipeline (note: unet and vae are loaded again in float32) pipeline = DiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=accelerator.unwrap_model(text_encoder), tokenizer=tokenizer, unet=unet, vae=vae, safety_checker=None, revision=args.revision, torch_dtype=weight_dtype, ) pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config) pipeline = pipeline.to(accelerator.device) pipeline.set_progress_bar_config(disable=True) # run inference generator = None if args.seed is None else torch.Generator(device=accelerator.device).manual_seed(args.seed) images = [] for _ in range(args.num_validation_images): with torch.autocast("cuda"): image = pipeline(args.validation_prompt, num_inference_steps=25, generator=generator).images[0] images.append(image) for tracker in accelerator.trackers: if tracker.name == "tensorboard": np_images = np.stack([np.asarray(img) for img in images]) tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC") if tracker.name == "wandb": tracker.log( { "validation": [ wandb.Image(image, caption=f"{i}: {args.validation_prompt}") for i, image in enumerate(images) ] } ) del pipeline torch.cuda.empty_cache() return images def save_progress(text_encoder, placeholder_token_ids, accelerator, args, save_path): logger.info("Saving embeddings") learned_embeds = ( accelerator.unwrap_model(text_encoder) .get_input_embeddings() .weight[min(placeholder_token_ids) : max(placeholder_token_ids) + 1] ) learned_embeds_dict = {args.placeholder_token: learned_embeds.detach().cpu()} torch.save(learned_embeds_dict, save_path) def parse_args(): parser = argparse.ArgumentParser(description="Simple example of a training script.") parser.add_argument( "--save_steps", type=int, default=500, help="Save learned_embeds.bin every X updates steps.", ) parser.add_argument( "--save_as_full_pipeline", action="store_true", help="Save the complete stable diffusion pipeline.", ) parser.add_argument( "--num_vectors", type=int, default=1, help="How many textual inversion vectors shall be used to learn the concept.", ) parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--train_data_dir", type=str, default=None, required=True, help="A folder containing the training data." ) parser.add_argument( "--placeholder_token", type=str, default=None, required=True, help="A token to use as a placeholder for the concept.", ) parser.add_argument( "--initializer_token", type=str, default=None, required=True, help="A token to use as initializer word." ) parser.add_argument("--learnable_property", type=str, default="object", help="Choose between 'object' and 'style'") parser.add_argument("--repeats", type=int, default=100, help="How many times to repeat the training data.") parser.add_argument( "--output_dir", type=str, default="text-inversion-model", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution." ) parser.add_argument( "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." ) parser.add_argument("--num_train_epochs", type=int, default=100) parser.add_argument( "--max_train_steps", type=int, default=5000, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--learning_rate", type=float, default=1e-4, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--dataloader_num_workers", type=int, default=0, help=( "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." ), ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--mixed_precision", type=str, default="no", choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." ), ) parser.add_argument( "--allow_tf32", action="store_true", help=( "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" ), ) parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) parser.add_argument( "--validation_prompt", type=str, default=None, help="A prompt that is used during validation to verify that the model is learning.", ) parser.add_argument( "--num_validation_images", type=int, default=4, help="Number of images that should be generated during validation with `validation_prompt`.", ) parser.add_argument( "--validation_steps", type=int, default=100, help=( "Run validation every X steps. Validation consists of running the prompt" " `args.validation_prompt` multiple times: `args.num_validation_images`" " and logging the images." ), ) parser.add_argument( "--validation_epochs", type=int, default=None, help=( "Deprecated in favor of validation_steps. Run validation every X epochs. Validation consists of running the prompt" " `args.validation_prompt` multiple times: `args.num_validation_images`" " and logging the images." ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming" " training using `--resume_from_checkpoint`." ), ) parser.add_argument( "--checkpoints_total_limit", type=int, default=None, help=( "Max number of checkpoints to store. Passed as `total_limit` to the `Accelerator` `ProjectConfiguration`." " See Accelerator::save_state https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.save_state" " for more docs" ), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) parser.add_argument( "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." ) args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank if args.train_data_dir is None: raise ValueError("You must specify a train data directory.") return args imagenet_templates_small = [ "a photo of a {}", "a rendering of a {}", "a cropped photo of the {}", "the photo of a {}", "a photo of a clean {}", "a photo of a dirty {}", "a dark photo of the {}", "a photo of my {}", "a photo of the cool {}", "a close-up photo of a {}", "a bright photo of the {}", "a cropped photo of a {}", "a photo of the {}", "a good photo of the {}", "a photo of one {}", "a close-up photo of the {}", "a rendition of the {}", "a photo of the clean {}", "a rendition of a {}", "a photo of a nice {}", "a good photo of a {}", "a photo of the nice {}", "a photo of the small {}", "a photo of the weird {}", "a photo of the large {}", "a photo of a cool {}", "a photo of a small {}", ] imagenet_style_templates_small = [ "a painting in the style of {}", "a rendering in the style of {}", "a cropped painting in the style of {}", "the painting in the style of {}", "a clean painting in the style of {}", "a dirty painting in the style of {}", "a dark painting in the style of {}", "a picture in the style of {}", "a cool painting in the style of {}", "a close-up painting in the style of {}", "a bright painting in the style of {}", "a cropped painting in the style of {}", "a good painting in the style of {}", "a close-up painting in the style of {}", "a rendition in the style of {}", "a nice painting in the style of {}", "a small painting in the style of {}", "a weird painting in the style of {}", "a large painting in the style of {}", ] class TextualInversionDataset(Dataset): def __init__( self, data_root, tokenizer, learnable_property="object", # [object, style] size=512, repeats=100, interpolation="bicubic", flip_p=0.5, set="train", placeholder_token="*", center_crop=False, ): self.data_root = data_root self.tokenizer = tokenizer self.learnable_property = learnable_property self.size = size self.placeholder_token = placeholder_token self.center_crop = center_crop self.flip_p = flip_p self.image_paths = [os.path.join(self.data_root, file_path) for file_path in os.listdir(self.data_root)] self.num_images = len(self.image_paths) self._length = self.num_images if set == "train": self._length = self.num_images * repeats self.interpolation = { "linear": PIL_INTERPOLATION["linear"], "bilinear": PIL_INTERPOLATION["bilinear"], "bicubic": PIL_INTERPOLATION["bicubic"], "lanczos": PIL_INTERPOLATION["lanczos"], }[interpolation] self.templates = imagenet_style_templates_small if learnable_property == "style" else imagenet_templates_small self.flip_transform = transforms.RandomHorizontalFlip(p=self.flip_p) def __len__(self): return self._length def __getitem__(self, i): example = {} image = Image.open(self.image_paths[i % self.num_images]) if not image.mode == "RGB": image = image.convert("RGB") placeholder_string = self.placeholder_token text = random.choice(self.templates).format(placeholder_string) example["input_ids"] = self.tokenizer( text, padding="max_length", truncation=True, max_length=self.tokenizer.model_max_length, return_tensors="pt", ).input_ids[0] # default to score-sde preprocessing img = np.array(image).astype(np.uint8) if self.center_crop: crop = min(img.shape[0], img.shape[1]) ( h, w, ) = ( img.shape[0], img.shape[1], ) img = img[(h - crop) // 2 : (h + crop) // 2, (w - crop) // 2 : (w + crop) // 2] image = Image.fromarray(img) image = image.resize((self.size, self.size), resample=self.interpolation) image = self.flip_transform(image) image = np.array(image).astype(np.uint8) image = (image / 127.5 - 1.0).astype(np.float32) example["pixel_values"] = torch.from_numpy(image).permute(2, 0, 1) return example def main(): args = parse_args() logging_dir = os.path.join(args.output_dir, args.logging_dir) accelerator_project_config = ProjectConfiguration( total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir ) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_config=accelerator_project_config, ) if args.report_to == "wandb": if not is_wandb_available(): raise ImportError("Make sure to install wandb if you want to use it for logging during training.") # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token ).repo_id # Load tokenizer if args.tokenizer_name: tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name) elif args.pretrained_model_name_or_path: tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer") # Load scheduler and models noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") text_encoder = CLIPTextModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision ) vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision) unet = UNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision ) # Add the placeholder token in tokenizer placeholder_tokens = [args.placeholder_token] if args.num_vectors < 1: raise ValueError(f"--num_vectors has to be larger or equal to 1, but is {args.num_vectors}") # add dummy tokens for multi-vector additional_tokens = [] for i in range(1, args.num_vectors): additional_tokens.append(f"{args.placeholder_token}_{i}") placeholder_tokens += additional_tokens num_added_tokens = tokenizer.add_tokens(placeholder_tokens) if num_added_tokens != args.num_vectors: raise ValueError( f"The tokenizer already contains the token {args.placeholder_token}. Please pass a different" " `placeholder_token` that is not already in the tokenizer." ) # Convert the initializer_token, placeholder_token to ids token_ids = tokenizer.encode(args.initializer_token, add_special_tokens=False) # Check if initializer_token is a single token or a sequence of tokens if len(token_ids) > 1: raise ValueError("The initializer token must be a single token.") initializer_token_id = token_ids[0] placeholder_token_ids = tokenizer.convert_tokens_to_ids(placeholder_tokens) # Resize the token embeddings as we are adding new special tokens to the tokenizer text_encoder.resize_token_embeddings(len(tokenizer)) # Initialise the newly added placeholder token with the embeddings of the initializer token token_embeds = text_encoder.get_input_embeddings().weight.data with torch.no_grad(): for token_id in placeholder_token_ids: token_embeds[token_id] = token_embeds[initializer_token_id].clone() # Freeze vae and unet vae.requires_grad_(False) unet.requires_grad_(False) # Freeze all parameters except for the token embeddings in text encoder text_encoder.text_model.encoder.requires_grad_(False) text_encoder.text_model.final_layer_norm.requires_grad_(False) text_encoder.text_model.embeddings.position_embedding.requires_grad_(False) if args.gradient_checkpointing: # Keep unet in train mode if we are using gradient checkpointing to save memory. # The dropout cannot be != 0 so it doesn't matter if we are in eval or train mode. unet.train() text_encoder.gradient_checkpointing_enable() unet.enable_gradient_checkpointing() if args.enable_xformers_memory_efficient_attention: if is_xformers_available(): import xformers xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): logger.warn( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if args.allow_tf32: torch.backends.cuda.matmul.allow_tf32 = True if args.scale_lr: args.learning_rate = ( args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes ) # Initialize the optimizer optimizer = torch.optim.AdamW( text_encoder.get_input_embeddings().parameters(), # only optimize the embeddings lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) optimizer = ORT_FP16_Optimizer(optimizer) # Dataset and DataLoaders creation: train_dataset = TextualInversionDataset( data_root=args.train_data_dir, tokenizer=tokenizer, size=args.resolution, placeholder_token=args.placeholder_token, repeats=args.repeats, learnable_property=args.learnable_property, center_crop=args.center_crop, set="train", ) train_dataloader = torch.utils.data.DataLoader( train_dataset, batch_size=args.train_batch_size, shuffle=True, num_workers=args.dataloader_num_workers ) if args.validation_epochs is not None: warnings.warn( f"FutureWarning: You are doing logging with validation_epochs={args.validation_epochs}." " Deprecated validation_epochs in favor of `validation_steps`" f"Setting `args.validation_steps` to {args.validation_epochs * len(train_dataset)}", FutureWarning, stacklevel=2, ) args.validation_steps = args.validation_epochs * len(train_dataset) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes, num_training_steps=args.max_train_steps * accelerator.num_processes, ) # Prepare everything with our `accelerator`. text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( text_encoder, optimizer, train_dataloader, lr_scheduler ) text_encoder = ORTModule(text_encoder) unet = ORTModule(unet) vae = ORTModule(vae) # For mixed precision training we cast the unet and vae weights to half-precision # as these models are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # Move vae and unet to device and cast to weight_dtype unet.to(accelerator.device, dtype=weight_dtype) vae.to(accelerator.device, dtype=weight_dtype) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: accelerator.init_trackers("textual_inversion", config=vars(args)) # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 first_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint != "latest": path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = os.listdir(args.output_dir) dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) path = dirs[-1] if len(dirs) > 0 else None if path is None: accelerator.print( f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." ) args.resume_from_checkpoint = None else: accelerator.print(f"Resuming from checkpoint {path}") accelerator.load_state(os.path.join(args.output_dir, path)) global_step = int(path.split("-")[1]) resume_global_step = global_step * args.gradient_accumulation_steps first_epoch = global_step // num_update_steps_per_epoch resume_step = resume_global_step % (num_update_steps_per_epoch * args.gradient_accumulation_steps) # Only show the progress bar once on each machine. progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process) progress_bar.set_description("Steps") # keep original embeddings as reference orig_embeds_params = accelerator.unwrap_model(text_encoder).get_input_embeddings().weight.data.clone() for epoch in range(first_epoch, args.num_train_epochs): text_encoder.train() for step, batch in enumerate(train_dataloader): # Skip steps until we reach the resumed step if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step: if step % args.gradient_accumulation_steps == 0: progress_bar.update(1) continue with accelerator.accumulate(text_encoder): # Convert images to latent space latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample().detach() latents = latents * vae.config.scaling_factor # Sample noise that we'll add to the latents noise = torch.randn_like(latents) bsz = latents.shape[0] # Sample a random timestep for each image timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device) timesteps = timesteps.long() # Add noise to the latents according to the noise magnitude at each timestep # (this is the forward diffusion process) noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) # Get the text embedding for conditioning encoder_hidden_states = text_encoder(batch["input_ids"])[0].to(dtype=weight_dtype) # Predict the noise residual model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample # Get the target for loss depending on the prediction type if noise_scheduler.config.prediction_type == "epsilon": target = noise elif noise_scheduler.config.prediction_type == "v_prediction": target = noise_scheduler.get_velocity(latents, noise, timesteps) else: raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Let's make sure we don't update any embedding weights besides the newly added token index_no_updates = torch.ones((len(tokenizer),), dtype=torch.bool) index_no_updates[min(placeholder_token_ids) : max(placeholder_token_ids) + 1] = False with torch.no_grad(): accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[ index_no_updates ] = orig_embeds_params[index_no_updates] # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: images = [] progress_bar.update(1) global_step += 1 if global_step % args.save_steps == 0: save_path = os.path.join(args.output_dir, f"learned_embeds-steps-{global_step}.bin") save_progress(text_encoder, placeholder_token_ids, accelerator, args, save_path) if accelerator.is_main_process: if global_step % args.checkpointing_steps == 0: save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") accelerator.save_state(save_path) logger.info(f"Saved state to {save_path}") if args.validation_prompt is not None and global_step % args.validation_steps == 0: images = log_validation( text_encoder, tokenizer, unet, vae, args, accelerator, weight_dtype, epoch ) logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) accelerator.log(logs, step=global_step) if global_step >= args.max_train_steps: break # Create the pipeline using the trained modules and save it. accelerator.wait_for_everyone() if accelerator.is_main_process: if args.push_to_hub and not args.save_as_full_pipeline: logger.warn("Enabling full model saving because --push_to_hub=True was specified.") save_full_model = True else: save_full_model = args.save_as_full_pipeline if save_full_model: pipeline = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=accelerator.unwrap_model(text_encoder), vae=vae, unet=unet, tokenizer=tokenizer, ) pipeline.save_pretrained(args.output_dir) # Save the newly trained embeddings save_path = os.path.join(args.output_dir, "learned_embeds.bin") save_progress(text_encoder, placeholder_token_ids, accelerator, args, save_path) if args.push_to_hub: save_model_card( repo_id, images=images, base_model=args.pretrained_model_name_or_path, repo_folder=args.output_dir, ) upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) accelerator.end_training() if __name__ == "__main__": main()
0
hf_public_repos/diffusers/examples/research_projects/onnxruntime
hf_public_repos/diffusers/examples/research_projects/onnxruntime/unconditional_image_generation/requirements.txt
accelerate>=0.16.0 torchvision datasets tensorboard
0