text
stringlengths
7
328k
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
459
const CopyWebpackPlugin = require("copy-webpack-plugin"); const path = require('path'); module.exports = { entry: "./bootstrap.js", output: { path: path.resolve(__dirname, "dist"), filename: "bootstrap.js", }, mode: "development", plugins: [ new CopyWebpackPlugin(['index.html']) ], };
tokenizers/tokenizers/examples/unstable_wasm/www/webpack.config.js/0
{ "file_path": "tokenizers/tokenizers/examples/unstable_wasm/www/webpack.config.js", "repo_id": "tokenizers", "token_count": 114 }
239
//! Popular tokenizer models. pub mod bpe; pub mod unigram; pub mod wordlevel; pub mod wordpiece; use std::collections::HashMap; use std::path::{Path, PathBuf}; use serde::{Deserialize, Serialize, Serializer}; use crate::models::bpe::{BpeTrainer, BPE}; use crate::models::unigram::{Unigram, UnigramTrainer}; use crate::models::wordlevel::{WordLevel, WordLevelTrainer}; use crate::models::wordpiece::{WordPiece, WordPieceTrainer}; use crate::{AddedToken, Model, Result, Token, Trainer}; /// Wraps a vocab mapping (ID -> token) to a struct that will be serialized in order /// of token ID, smallest to largest. struct OrderedVocabIter<'a> { vocab_r: &'a HashMap<u32, String>, } impl<'a> OrderedVocabIter<'a> { fn new(vocab_r: &'a HashMap<u32, String>) -> Self { Self { vocab_r } } } impl<'a> Serialize for OrderedVocabIter<'a> { fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error> where S: Serializer, { // There could be holes so max + 1 is more correct than vocab_r.len() let mut holes = vec![]; let result = if let Some(max) = self.vocab_r.iter().map(|(key, _)| key).max() { let iter = (0..*max + 1).filter_map(|i| { if let Some(token) = self.vocab_r.get(&i) { Some((token, i)) } else { holes.push(i); None } }); serializer.collect_map(iter) } else { serializer.collect_map(std::iter::empty::<(&str, u32)>()) }; if !holes.is_empty() { warn!("The OrderedVocab you are attempting to save contains holes for indices {:?}, your vocabulary could be corrupted !", holes); println!("The OrderedVocab you are attempting to save contains holes for indices {:?}, your vocabulary could be corrupted !", holes); } result } } #[derive(Deserialize, Serialize, Debug, PartialEq, Clone)] #[serde(untagged)] pub enum ModelWrapper { BPE(BPE), // WordPiece must stay before WordLevel here for deserialization (for retrocompatibility // with the versions not including the "type"), since WordLevel is a subset of WordPiece WordPiece(WordPiece), WordLevel(WordLevel), Unigram(Unigram), } impl_enum_from!(WordLevel, ModelWrapper, WordLevel); impl_enum_from!(WordPiece, ModelWrapper, WordPiece); impl_enum_from!(BPE, ModelWrapper, BPE); impl_enum_from!(Unigram, ModelWrapper, Unigram); impl Model for ModelWrapper { type Trainer = TrainerWrapper; fn tokenize(&self, tokens: &str) -> Result<Vec<Token>> { match self { Self::WordLevel(t) => t.tokenize(tokens), Self::WordPiece(t) => t.tokenize(tokens), Self::BPE(t) => t.tokenize(tokens), Self::Unigram(t) => t.tokenize(tokens), } } fn token_to_id(&self, token: &str) -> Option<u32> { match self { Self::WordLevel(t) => t.token_to_id(token), Self::WordPiece(t) => t.token_to_id(token), Self::BPE(t) => t.token_to_id(token), Self::Unigram(t) => t.token_to_id(token), } } fn id_to_token(&self, id: u32) -> Option<String> { match self { Self::WordLevel(t) => t.id_to_token(id), Self::WordPiece(t) => t.id_to_token(id), Self::BPE(t) => t.id_to_token(id), Self::Unigram(t) => t.id_to_token(id), } } fn get_vocab(&self) -> HashMap<String, u32> { match self { Self::WordLevel(t) => t.get_vocab(), Self::WordPiece(t) => t.get_vocab(), Self::BPE(t) => t.get_vocab(), Self::Unigram(t) => t.get_vocab(), } } fn get_vocab_size(&self) -> usize { match self { Self::WordLevel(t) => t.get_vocab_size(), Self::WordPiece(t) => t.get_vocab_size(), Self::BPE(t) => t.get_vocab_size(), Self::Unigram(t) => t.get_vocab_size(), } } fn save(&self, folder: &Path, name: Option<&str>) -> Result<Vec<PathBuf>> { match self { Self::WordLevel(t) => t.save(folder, name), Self::WordPiece(t) => t.save(folder, name), Self::BPE(t) => t.save(folder, name), Self::Unigram(t) => t.save(folder, name), } } fn get_trainer(&self) -> Self::Trainer { match self { Self::WordLevel(t) => t.get_trainer().into(), Self::WordPiece(t) => t.get_trainer().into(), Self::BPE(t) => t.get_trainer().into(), Self::Unigram(t) => t.get_trainer().into(), } } } #[derive(Clone, Serialize, Deserialize)] pub enum TrainerWrapper { BpeTrainer(BpeTrainer), WordPieceTrainer(WordPieceTrainer), WordLevelTrainer(WordLevelTrainer), UnigramTrainer(UnigramTrainer), } impl Trainer for TrainerWrapper { type Model = ModelWrapper; fn should_show_progress(&self) -> bool { match self { Self::BpeTrainer(bpe) => bpe.should_show_progress(), Self::WordPieceTrainer(wpt) => wpt.should_show_progress(), Self::WordLevelTrainer(wpt) => wpt.should_show_progress(), Self::UnigramTrainer(wpt) => wpt.should_show_progress(), } } fn train(&self, model: &mut ModelWrapper) -> Result<Vec<AddedToken>> { match self { Self::BpeTrainer(t) => match model { ModelWrapper::BPE(bpe) => t.train(bpe), _ => Err("BpeTrainer can only train a BPE".into()), }, Self::WordPieceTrainer(t) => match model { ModelWrapper::WordPiece(wp) => t.train(wp), _ => Err("WordPieceTrainer can only train a WordPiece".into()), }, Self::WordLevelTrainer(t) => match model { ModelWrapper::WordLevel(wl) => t.train(wl), _ => Err("WordLevelTrainer can only train a WordLevel".into()), }, Self::UnigramTrainer(t) => match model { ModelWrapper::Unigram(u) => t.train(u), _ => Err("UnigramTrainer can only train a Unigram".into()), }, } } fn feed<I, S, F>(&mut self, iterator: I, process: F) -> Result<()> where I: Iterator<Item = S> + Send, S: AsRef<str> + Send, F: Fn(&str) -> Result<Vec<String>> + Sync, { match self { Self::BpeTrainer(bpe) => bpe.feed(iterator, process), Self::WordPieceTrainer(wpt) => wpt.feed(iterator, process), Self::WordLevelTrainer(wpt) => wpt.feed(iterator, process), Self::UnigramTrainer(wpt) => wpt.feed(iterator, process), } } } impl_enum_from!(BpeTrainer, TrainerWrapper, BpeTrainer); impl_enum_from!(WordPieceTrainer, TrainerWrapper, WordPieceTrainer); impl_enum_from!(UnigramTrainer, TrainerWrapper, UnigramTrainer); impl_enum_from!(WordLevelTrainer, TrainerWrapper, WordLevelTrainer); #[cfg(test)] mod tests { use super::*; #[test] fn trainer_wrapper_train_model_wrapper() { let trainer = TrainerWrapper::BpeTrainer(BpeTrainer::default()); let mut model = ModelWrapper::Unigram(Unigram::default()); let result = trainer.train(&mut model); assert!(result.is_err()); } #[test] fn incomplete_ordered_vocab() { let vocab_r: HashMap<u32, String> = HashMap::from([(0, "Hi".to_string()), (2, "There".to_string())]); let ordered = OrderedVocabIter::new(&vocab_r); let serialized = serde_json::to_string(&ordered).unwrap(); assert_eq!(serialized, "{\"Hi\":0,\"There\":2}"); } }
tokenizers/tokenizers/src/models/mod.rs/0
{ "file_path": "tokenizers/tokenizers/src/models/mod.rs", "repo_id": "tokenizers", "token_count": 3660 }
240
use crate::tokenizer::{NormalizedString, Normalizer, Result}; use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, Deserialize, Serialize)] #[serde(tag = "type")] pub struct Prepend { pub prepend: String, } impl Prepend { pub fn new(prepend: String) -> Self { Self { prepend } } } impl Normalizer for Prepend { /// Strip the normalized string inplace fn normalize(&self, normalized: &mut NormalizedString) -> Result<()> { if !normalized.is_empty() { normalized.prepend(&self.prepend); } Ok(()) } } #[cfg(test)] mod tests { use super::*; #[test] fn test_prepend() { let original = "Hello"; let normalized = "▁Hello"; assert_ne!(original, normalized); let mut n = NormalizedString::from(original); let prepend = Prepend::new("▁".to_string()); prepend.normalize(&mut n).unwrap(); assert_eq!(&n.get(), &normalized); assert_eq!( n, NormalizedString::new( original.to_string(), normalized.to_string(), vec![ (0, 1), (0, 1), (0, 1), (0, 1), (1, 2), (2, 3), (3, 4), (4, 5) ], 0 ) ); assert_eq!( n.alignments_original(), vec![(0, 4), (4, 5), (5, 6), (6, 7), (7, 8)] ); } }
tokenizers/tokenizers/src/normalizers/prepend.rs/0
{ "file_path": "tokenizers/tokenizers/src/normalizers/prepend.rs", "repo_id": "tokenizers", "token_count": 856 }
241
// Generated by modified Perl script at https://github.com/google/sentencepiece/blob/master/data/gen_unicode_scripts_code.pl // Unicode scripts : https://gist.github.com/Narsil/07556f26dc84a6baeff4d499e68d3cd2 // Rust adaptation : https://gist.github.com/Narsil/1df9fbbf5296a8d4d62de55dcb2fe700 #[derive(PartialEq, Debug, Clone, Copy, Eq)] pub enum Script { Any, Adlam, Ahom, AnatolianHieroglyphs, Arabic, Armenian, Avestan, Balinese, Bamum, BassaVah, Batak, Bengali, Bhaiksuki, Bopomofo, Brahmi, Braille, Buginese, Buhid, CanadianAboriginal, Carian, CaucasianAlbanian, Chakma, Cham, Cherokee, Common, Coptic, Cuneiform, Cypriot, Cyrillic, Deseret, Devanagari, Duployan, EgyptianHieroglyphs, Elbasan, Ethiopic, Georgian, Glagolitic, Gothic, Grantha, Greek, Gujarati, Gurmukhi, Han, Hangul, Hanunoo, Hatran, Hebrew, Hiragana, ImperialAramaic, Inherited, InscriptionalPahlavi, InscriptionalParthian, Javanese, Kaithi, Kannada, Katakana, KayahLi, Kharoshthi, Khmer, Khojki, Khudawadi, Lao, Latin, Lepcha, Limbu, LinearA, LinearB, Lisu, Lycian, Lydian, Mahajani, Malayalam, Mandaic, Manichaean, Marchen, MeeteiMayek, MendeKikakui, MeroiticCursive, MeroiticHieroglyphs, Miao, Modi, Mongolian, Mro, Multani, Myanmar, Nabataean, NewTaiLue, Newa, Nko, Ogham, OlChiki, OldHungarian, OldItalic, OldNorthArabian, OldPermic, OldPersian, OldSouthArabian, OldTurkic, Oriya, Osage, Osmanya, PahawhHmong, Palmyrene, PauCinHau, PhagsPa, Phoenician, PsalterPahlavi, Rejang, Runic, Samaritan, Saurashtra, Sharada, Shavian, Siddham, SignWriting, Sinhala, SoraSompeng, Sundanese, SylotiNagri, Syriac, Tagalog, Tagbanwa, TaiLe, TaiTham, TaiViet, Takri, Tamil, Tangut, Telugu, Thaana, Thai, Tibetan, Tifinagh, Tirhuta, Ugaritic, Vai, WarangCiti, Yi, } pub fn get_script(c: char) -> Script { match c as u32 { 0x0000..=0x001F => Script::Common, 0x0020 => Script::Common, 0x0021..=0x0023 => Script::Common, 0x0024 => Script::Common, 0x0025..=0x0027 => Script::Common, 0x0028 => Script::Common, 0x0029 => Script::Common, 0x002A => Script::Common, 0x002B => Script::Common, 0x002C => Script::Common, 0x002D => Script::Common, 0x002E..=0x002F => Script::Common, 0x0030..=0x0039 => Script::Common, 0x003A..=0x003B => Script::Common, 0x003C..=0x003E => Script::Common, 0x003F..=0x0040 => Script::Common, 0x005B => Script::Common, 0x005C => Script::Common, 0x005D => Script::Common, 0x005E => Script::Common, 0x005F => Script::Common, 0x0060 => Script::Common, 0x007B => Script::Common, 0x007C => Script::Common, 0x007D => Script::Common, 0x007E => Script::Common, 0x007F..=0x009F => Script::Common, 0x00A0 => Script::Common, 0x00A1 => Script::Common, 0x00A2..=0x00A5 => Script::Common, 0x00A6 => Script::Common, 0x00A7 => Script::Common, 0x00A8 => Script::Common, 0x00A9 => Script::Common, 0x00AB => Script::Common, 0x00AC => Script::Common, 0x00AD => Script::Common, 0x00AE => Script::Common, 0x00AF => Script::Common, 0x00B0 => Script::Common, 0x00B1 => Script::Common, 0x00B2..=0x00B3 => Script::Common, 0x00B4 => Script::Common, 0x00B5 => Script::Common, 0x00B6..=0x00B7 => Script::Common, 0x00B8 => Script::Common, 0x00B9 => Script::Common, 0x00BB => Script::Common, 0x00BC..=0x00BE => Script::Common, 0x00BF => Script::Common, 0x00D7 => Script::Common, 0x00F7 => Script::Common, 0x02B9..=0x02C1 => Script::Common, 0x02C2..=0x02C5 => Script::Common, 0x02C6..=0x02D1 => Script::Common, 0x02D2..=0x02DF => Script::Common, 0x02E5..=0x02E9 => Script::Common, 0x02EC => Script::Common, 0x02ED => Script::Common, 0x02EE => Script::Common, 0x02EF..=0x02FF => Script::Common, 0x0374 => Script::Common, 0x037E => Script::Common, 0x0385 => Script::Common, 0x0387 => Script::Common, 0x0589 => Script::Common, 0x0605 => Script::Common, 0x060C => Script::Common, 0x061B => Script::Common, 0x061C => Script::Common, 0x061F => Script::Common, 0x0640 => Script::Common, 0x06DD => Script::Common, 0x08E2 => Script::Common, 0x0964..=0x0965 => Script::Common, 0x0E3F => Script::Common, 0x0FD5..=0x0FD8 => Script::Common, 0x10FB => Script::Common, 0x16EB..=0x16ED => Script::Common, 0x1735..=0x1736 => Script::Common, 0x1802..=0x1803 => Script::Common, 0x1805 => Script::Common, 0x1CD3 => Script::Common, 0x1CE1 => Script::Common, 0x1CE9..=0x1CEC => Script::Common, 0x1CEE..=0x1CF1 => Script::Common, 0x1CF2..=0x1CF3 => Script::Common, 0x1CF5..=0x1CF6 => Script::Common, 0x2000..=0x200A => Script::Common, 0x200B => Script::Common, 0x200E..=0x200F => Script::Common, 0x2010..=0x2015 => Script::Common, 0x2016..=0x2017 => Script::Common, 0x2018 => Script::Common, 0x2019 => Script::Common, 0x201A => Script::Common, 0x201B..=0x201C => Script::Common, 0x201D => Script::Common, 0x201E => Script::Common, 0x201F => Script::Common, 0x2020..=0x2027 => Script::Common, 0x2028 => Script::Common, 0x2029 => Script::Common, 0x202A..=0x202E => Script::Common, 0x202F => Script::Common, 0x2030..=0x2038 => Script::Common, 0x2039 => Script::Common, 0x203A => Script::Common, 0x203B..=0x203E => Script::Common, 0x203F..=0x2040 => Script::Common, 0x2041..=0x2043 => Script::Common, 0x2044 => Script::Common, 0x2045 => Script::Common, 0x2046 => Script::Common, 0x2047..=0x2051 => Script::Common, 0x2052 => Script::Common, 0x2053 => Script::Common, 0x2054 => Script::Common, 0x2055..=0x205E => Script::Common, 0x205F => Script::Common, 0x2060..=0x2064 => Script::Common, 0x2066..=0x206F => Script::Common, 0x2070 => Script::Common, 0x2074..=0x2079 => Script::Common, 0x207A..=0x207C => Script::Common, 0x207D => Script::Common, 0x207E => Script::Common, 0x2080..=0x2089 => Script::Common, 0x208A..=0x208C => Script::Common, 0x208D => Script::Common, 0x208E => Script::Common, 0x20A0..=0x20BE => Script::Common, 0x2100..=0x2101 => Script::Common, 0x2102 => Script::Common, 0x2103..=0x2106 => Script::Common, 0x2107 => Script::Common, 0x2108..=0x2109 => Script::Common, 0x210A..=0x2113 => Script::Common, 0x2114 => Script::Common, 0x2115 => Script::Common, 0x2116..=0x2117 => Script::Common, 0x2118 => Script::Common, 0x2119..=0x211D => Script::Common, 0x211E..=0x2123 => Script::Common, 0x2124 => Script::Common, 0x2125 => Script::Common, 0x2127 => Script::Common, 0x2128 => Script::Common, 0x2129 => Script::Common, 0x212C..=0x212D => Script::Common, 0x212E => Script::Common, 0x212F..=0x2131 => Script::Common, 0x2133..=0x2134 => Script::Common, 0x2135..=0x2138 => Script::Common, 0x2139 => Script::Common, 0x213A..=0x213B => Script::Common, 0x213C..=0x213F => Script::Common, 0x2140..=0x2144 => Script::Common, 0x2145..=0x2149 => Script::Common, 0x214A => Script::Common, 0x214B => Script::Common, 0x214C..=0x214D => Script::Common, 0x214F => Script::Common, 0x2150..=0x215F => Script::Common, 0x2189 => Script::Common, 0x218A..=0x218B => Script::Common, 0x2190..=0x2194 => Script::Common, 0x2195..=0x2199 => Script::Common, 0x219A..=0x219B => Script::Common, 0x219C..=0x219F => Script::Common, 0x21A0 => Script::Common, 0x21A1..=0x21A2 => Script::Common, 0x21A3 => Script::Common, 0x21A4..=0x21A5 => Script::Common, 0x21A6 => Script::Common, 0x21A7..=0x21AD => Script::Common, 0x21AE => Script::Common, 0x21AF..=0x21CD => Script::Common, 0x21CE..=0x21CF => Script::Common, 0x21D0..=0x21D1 => Script::Common, 0x21D2 => Script::Common, 0x21D3 => Script::Common, 0x21D4 => Script::Common, 0x21D5..=0x21F3 => Script::Common, 0x21F4..=0x22FF => Script::Common, 0x2300..=0x2307 => Script::Common, 0x2308 => Script::Common, 0x2309 => Script::Common, 0x230A => Script::Common, 0x230B => Script::Common, 0x230C..=0x231F => Script::Common, 0x2320..=0x2321 => Script::Common, 0x2322..=0x2328 => Script::Common, 0x2329 => Script::Common, 0x232A => Script::Common, 0x232B..=0x237B => Script::Common, 0x237C => Script::Common, 0x237D..=0x239A => Script::Common, 0x239B..=0x23B3 => Script::Common, 0x23B4..=0x23DB => Script::Common, 0x23DC..=0x23E1 => Script::Common, 0x23E2..=0x23FE => Script::Common, 0x2400..=0x2426 => Script::Common, 0x2440..=0x244A => Script::Common, 0x2460..=0x249B => Script::Common, 0x249C..=0x24E9 => Script::Common, 0x24EA..=0x24FF => Script::Common, 0x2500..=0x25B6 => Script::Common, 0x25B7 => Script::Common, 0x25B8..=0x25C0 => Script::Common, 0x25C1 => Script::Common, 0x25C2..=0x25F7 => Script::Common, 0x25F8..=0x25FF => Script::Common, 0x2600..=0x266E => Script::Common, 0x266F => Script::Common, 0x2670..=0x2767 => Script::Common, 0x2768 => Script::Common, 0x2769 => Script::Common, 0x276A => Script::Common, 0x276B => Script::Common, 0x276C => Script::Common, 0x276D => Script::Common, 0x276E => Script::Common, 0x276F => Script::Common, 0x2770 => Script::Common, 0x2771 => Script::Common, 0x2772 => Script::Common, 0x2773 => Script::Common, 0x2774 => Script::Common, 0x2775 => Script::Common, 0x2776..=0x2793 => Script::Common, 0x2794..=0x27BF => Script::Common, 0x27C0..=0x27C4 => Script::Common, 0x27C5 => Script::Common, 0x27C6 => Script::Common, 0x27C7..=0x27E5 => Script::Common, 0x27E6 => Script::Common, 0x27E7 => Script::Common, 0x27E8 => Script::Common, 0x27E9 => Script::Common, 0x27EA => Script::Common, 0x27EB => Script::Common, 0x27EC => Script::Common, 0x27ED => Script::Common, 0x27EE => Script::Common, 0x27EF => Script::Common, 0x27F0..=0x27FF => Script::Common, 0x2900..=0x2982 => Script::Common, 0x2983 => Script::Common, 0x2984 => Script::Common, 0x2985 => Script::Common, 0x2986 => Script::Common, 0x2987 => Script::Common, 0x2988 => Script::Common, 0x2989 => Script::Common, 0x298A => Script::Common, 0x298B => Script::Common, 0x298C => Script::Common, 0x298D => Script::Common, 0x298E => Script::Common, 0x298F => Script::Common, 0x2990 => Script::Common, 0x2991 => Script::Common, 0x2992 => Script::Common, 0x2993 => Script::Common, 0x2994 => Script::Common, 0x2995 => Script::Common, 0x2996 => Script::Common, 0x2997 => Script::Common, 0x2998 => Script::Common, 0x2999..=0x29D7 => Script::Common, 0x29D8 => Script::Common, 0x29D9 => Script::Common, 0x29DA => Script::Common, 0x29DB => Script::Common, 0x29DC..=0x29FB => Script::Common, 0x29FC => Script::Common, 0x29FD => Script::Common, 0x29FE..=0x2AFF => Script::Common, 0x2B00..=0x2B2F => Script::Common, 0x2B30..=0x2B44 => Script::Common, 0x2B45..=0x2B46 => Script::Common, 0x2B47..=0x2B4C => Script::Common, 0x2B4D..=0x2B73 => Script::Common, 0x2B76..=0x2B95 => Script::Common, 0x2B98..=0x2BB9 => Script::Common, 0x2BBD..=0x2BC8 => Script::Common, 0x2BCA..=0x2BD1 => Script::Common, 0x2BEC..=0x2BEF => Script::Common, 0x2E00..=0x2E01 => Script::Common, 0x2E02 => Script::Common, 0x2E03 => Script::Common, 0x2E04 => Script::Common, 0x2E05 => Script::Common, 0x2E06..=0x2E08 => Script::Common, 0x2E09 => Script::Common, 0x2E0A => Script::Common, 0x2E0B => Script::Common, 0x2E0C => Script::Common, 0x2E0D => Script::Common, 0x2E0E..=0x2E16 => Script::Common, 0x2E17 => Script::Common, 0x2E18..=0x2E19 => Script::Common, 0x2E1A => Script::Common, 0x2E1B => Script::Common, 0x2E1C => Script::Common, 0x2E1D => Script::Common, 0x2E1E..=0x2E1F => Script::Common, 0x2E20 => Script::Common, 0x2E21 => Script::Common, 0x2E22 => Script::Common, 0x2E23 => Script::Common, 0x2E24 => Script::Common, 0x2E25 => Script::Common, 0x2E26 => Script::Common, 0x2E27 => Script::Common, 0x2E28 => Script::Common, 0x2E29 => Script::Common, 0x2E2A..=0x2E2E => Script::Common, 0x2E2F => Script::Common, 0x2E30..=0x2E39 => Script::Common, 0x2E3A..=0x2E3B => Script::Common, 0x2E3C..=0x2E3F => Script::Common, 0x2E40 => Script::Common, 0x2E41 => Script::Common, 0x2E42 => Script::Common, 0x2E43..=0x2E44 => Script::Common, 0x2FF0..=0x2FFB => Script::Common, 0x3000 => Script::Common, 0x3001..=0x3003 => Script::Common, 0x3004 => Script::Common, 0x3006 => Script::Common, 0x3008 => Script::Common, 0x3009 => Script::Common, 0x300A => Script::Common, 0x300B => Script::Common, 0x300C => Script::Common, 0x300D => Script::Common, 0x300E => Script::Common, 0x300F => Script::Common, 0x3010 => Script::Common, 0x3011 => Script::Common, 0x3012..=0x3013 => Script::Common, 0x3014 => Script::Common, 0x3015 => Script::Common, 0x3016 => Script::Common, 0x3017 => Script::Common, 0x3018 => Script::Common, 0x3019 => Script::Common, 0x301A => Script::Common, 0x301B => Script::Common, 0x301C => Script::Common, 0x301D => Script::Common, 0x301E..=0x301F => Script::Common, 0x3020 => Script::Common, 0x3030 => Script::Common, 0x3031..=0x3035 => Script::Common, 0x3036..=0x3037 => Script::Common, 0x303C => Script::Common, 0x303D => Script::Common, 0x303E..=0x303F => Script::Common, 0x309B..=0x309C => Script::Common, 0x30A0 => Script::Common, 0x30FB => Script::Common, 0x30FC => Script::Common, 0x3190..=0x3191 => Script::Common, 0x3192..=0x3195 => Script::Common, 0x3196..=0x319F => Script::Common, 0x31C0..=0x31E3 => Script::Common, 0x3220..=0x3229 => Script::Common, 0x322A..=0x3247 => Script::Common, 0x3248..=0x324F => Script::Common, 0x3250 => Script::Common, 0x3251..=0x325F => Script::Common, 0x327F => Script::Common, 0x3280..=0x3289 => Script::Common, 0x328A..=0x32B0 => Script::Common, 0x32B1..=0x32BF => Script::Common, 0x32C0..=0x32CF => Script::Common, 0x3358..=0x33FF => Script::Common, 0x4DC0..=0x4DFF => Script::Common, 0xA700..=0xA716 => Script::Common, 0xA717..=0xA71F => Script::Common, 0xA720..=0xA721 => Script::Common, 0xA788 => Script::Common, 0xA789..=0xA78A => Script::Common, 0xA830..=0xA835 => Script::Common, 0xA836..=0xA837 => Script::Common, 0xA838 => Script::Common, 0xA839 => Script::Common, 0xA92E => Script::Common, 0xA9CF => Script::Common, 0xAB5B => Script::Common, 0xFD3E => Script::Common, 0xFD3F => Script::Common, 0xFE10..=0xFE16 => Script::Common, 0xFE17 => Script::Common, 0xFE18 => Script::Common, 0xFE19 => Script::Common, 0xFE30 => Script::Common, 0xFE31..=0xFE32 => Script::Common, 0xFE33..=0xFE34 => Script::Common, 0xFE35 => Script::Common, 0xFE36 => Script::Common, 0xFE37 => Script::Common, 0xFE38 => Script::Common, 0xFE39 => Script::Common, 0xFE3A => Script::Common, 0xFE3B => Script::Common, 0xFE3C => Script::Common, 0xFE3D => Script::Common, 0xFE3E => Script::Common, 0xFE3F => Script::Common, 0xFE40 => Script::Common, 0xFE41 => Script::Common, 0xFE42 => Script::Common, 0xFE43 => Script::Common, 0xFE44 => Script::Common, 0xFE45..=0xFE46 => Script::Common, 0xFE47 => Script::Common, 0xFE48 => Script::Common, 0xFE49..=0xFE4C => Script::Common, 0xFE4D..=0xFE4F => Script::Common, 0xFE50..=0xFE52 => Script::Common, 0xFE54..=0xFE57 => Script::Common, 0xFE58 => Script::Common, 0xFE59 => Script::Common, 0xFE5A => Script::Common, 0xFE5B => Script::Common, 0xFE5C => Script::Common, 0xFE5D => Script::Common, 0xFE5E => Script::Common, 0xFE5F..=0xFE61 => Script::Common, 0xFE62 => Script::Common, 0xFE63 => Script::Common, 0xFE64..=0xFE66 => Script::Common, 0xFE68 => Script::Common, 0xFE69 => Script::Common, 0xFE6A..=0xFE6B => Script::Common, 0xFEFF => Script::Common, 0xFF01..=0xFF03 => Script::Common, 0xFF04 => Script::Common, 0xFF05..=0xFF07 => Script::Common, 0xFF08 => Script::Common, 0xFF09 => Script::Common, 0xFF0A => Script::Common, 0xFF0B => Script::Common, 0xFF0C => Script::Common, 0xFF0D => Script::Common, 0xFF0E..=0xFF0F => Script::Common, 0xFF10..=0xFF19 => Script::Common, 0xFF1A..=0xFF1B => Script::Common, 0xFF1C..=0xFF1E => Script::Common, 0xFF1F..=0xFF20 => Script::Common, 0xFF3B => Script::Common, 0xFF3C => Script::Common, 0xFF3D => Script::Common, 0xFF3E => Script::Common, 0xFF3F => Script::Common, 0xFF40 => Script::Common, 0xFF5B => Script::Common, 0xFF5C => Script::Common, 0xFF5D => Script::Common, 0xFF5E => Script::Common, 0xFF5F => Script::Common, 0xFF60 => Script::Common, 0xFF61 => Script::Common, 0xFF62 => Script::Common, 0xFF63 => Script::Common, 0xFF64..=0xFF65 => Script::Common, 0xFF70 => Script::Common, 0xFF9E..=0xFF9F => Script::Common, 0xFFE0..=0xFFE1 => Script::Common, 0xFFE2 => Script::Common, 0xFFE3 => Script::Common, 0xFFE4 => Script::Common, 0xFFE5..=0xFFE6 => Script::Common, 0xFFE8 => Script::Common, 0xFFE9..=0xFFEC => Script::Common, 0xFFED..=0xFFEE => Script::Common, 0xFFF9..=0xFFFB => Script::Common, 0xFFFC..=0xFFFD => Script::Common, 0x10100..=0x10102 => Script::Common, 0x10107..=0x10133 => Script::Common, 0x10137..=0x1013F => Script::Common, 0x10190..=0x1019B => Script::Common, 0x101D0..=0x101FC => Script::Common, 0x102E1..=0x102FB => Script::Common, 0x1BCA0..=0x1BCA3 => Script::Common, 0x1D000..=0x1D0F5 => Script::Common, 0x1D100..=0x1D126 => Script::Common, 0x1D129..=0x1D164 => Script::Common, 0x1D165..=0x1D166 => Script::Common, 0x1D16A..=0x1D16C => Script::Common, 0x1D16D..=0x1D172 => Script::Common, 0x1D173..=0x1D17A => Script::Common, 0x1D183..=0x1D184 => Script::Common, 0x1D18C..=0x1D1A9 => Script::Common, 0x1D1AE..=0x1D1E8 => Script::Common, 0x1D300..=0x1D356 => Script::Common, 0x1D360..=0x1D371 => Script::Common, 0x1D400..=0x1D454 => Script::Common, 0x1D456..=0x1D49C => Script::Common, 0x1D49E..=0x1D49F => Script::Common, 0x1D4A2 => Script::Common, 0x1D4A5..=0x1D4A6 => Script::Common, 0x1D4A9..=0x1D4AC => Script::Common, 0x1D4AE..=0x1D4B9 => Script::Common, 0x1D4BB => Script::Common, 0x1D4BD..=0x1D4C3 => Script::Common, 0x1D4C5..=0x1D505 => Script::Common, 0x1D507..=0x1D50A => Script::Common, 0x1D50D..=0x1D514 => Script::Common, 0x1D516..=0x1D51C => Script::Common, 0x1D51E..=0x1D539 => Script::Common, 0x1D53B..=0x1D53E => Script::Common, 0x1D540..=0x1D544 => Script::Common, 0x1D546 => Script::Common, 0x1D54A..=0x1D550 => Script::Common, 0x1D552..=0x1D6A5 => Script::Common, 0x1D6A8..=0x1D6C0 => Script::Common, 0x1D6C1 => Script::Common, 0x1D6C2..=0x1D6DA => Script::Common, 0x1D6DB => Script::Common, 0x1D6DC..=0x1D6FA => Script::Common, 0x1D6FB => Script::Common, 0x1D6FC..=0x1D714 => Script::Common, 0x1D715 => Script::Common, 0x1D716..=0x1D734 => Script::Common, 0x1D735 => Script::Common, 0x1D736..=0x1D74E => Script::Common, 0x1D74F => Script::Common, 0x1D750..=0x1D76E => Script::Common, 0x1D76F => Script::Common, 0x1D770..=0x1D788 => Script::Common, 0x1D789 => Script::Common, 0x1D78A..=0x1D7A8 => Script::Common, 0x1D7A9 => Script::Common, 0x1D7AA..=0x1D7C2 => Script::Common, 0x1D7C3 => Script::Common, 0x1D7C4..=0x1D7CB => Script::Common, 0x1D7CE..=0x1D7FF => Script::Common, 0x1F000..=0x1F02B => Script::Common, 0x1F030..=0x1F093 => Script::Common, 0x1F0A0..=0x1F0AE => Script::Common, 0x1F0B1..=0x1F0BF => Script::Common, 0x1F0C1..=0x1F0CF => Script::Common, 0x1F0D1..=0x1F0F5 => Script::Common, 0x1F100..=0x1F10C => Script::Common, 0x1F110..=0x1F12E => Script::Common, 0x1F130..=0x1F16B => Script::Common, 0x1F170..=0x1F1AC => Script::Common, 0x1F1E6..=0x1F1FF => Script::Common, 0x1F201..=0x1F202 => Script::Common, 0x1F210..=0x1F23B => Script::Common, 0x1F240..=0x1F248 => Script::Common, 0x1F250..=0x1F251 => Script::Common, 0x1F300..=0x1F3FA => Script::Common, 0x1F3FB..=0x1F3FF => Script::Common, 0x1F400..=0x1F6D2 => Script::Common, 0x1F6E0..=0x1F6EC => Script::Common, 0x1F6F0..=0x1F6F6 => Script::Common, 0x1F700..=0x1F773 => Script::Common, 0x1F780..=0x1F7D4 => Script::Common, 0x1F800..=0x1F80B => Script::Common, 0x1F810..=0x1F847 => Script::Common, 0x1F850..=0x1F859 => Script::Common, 0x1F860..=0x1F887 => Script::Common, 0x1F890..=0x1F8AD => Script::Common, 0x1F910..=0x1F91E => Script::Common, 0x1F920..=0x1F927 => Script::Common, 0x1F930 => Script::Common, 0x1F933..=0x1F93E => Script::Common, 0x1F940..=0x1F94B => Script::Common, 0x1F950..=0x1F95E => Script::Common, 0x1F980..=0x1F991 => Script::Common, 0x1F9C0 => Script::Common, 0xE0001 => Script::Common, 0xE0020..=0xE007F => Script::Common, 0x0041..=0x005A => Script::Latin, 0x0061..=0x007A => Script::Latin, 0x00AA => Script::Latin, 0x00BA => Script::Latin, 0x00C0..=0x00D6 => Script::Latin, 0x00D8..=0x00F6 => Script::Latin, 0x00F8..=0x01BA => Script::Latin, 0x01BB => Script::Latin, 0x01BC..=0x01BF => Script::Latin, 0x01C0..=0x01C3 => Script::Latin, 0x01C4..=0x0293 => Script::Latin, 0x0294 => Script::Latin, 0x0295..=0x02AF => Script::Latin, 0x02B0..=0x02B8 => Script::Latin, 0x02E0..=0x02E4 => Script::Latin, 0x1D00..=0x1D25 => Script::Latin, 0x1D2C..=0x1D5C => Script::Latin, 0x1D62..=0x1D65 => Script::Latin, 0x1D6B..=0x1D77 => Script::Latin, 0x1D79..=0x1D9A => Script::Latin, 0x1D9B..=0x1DBE => Script::Latin, 0x1E00..=0x1EFF => Script::Latin, 0x2071 => Script::Latin, 0x207F => Script::Latin, 0x2090..=0x209C => Script::Latin, 0x212A..=0x212B => Script::Latin, 0x2132 => Script::Latin, 0x214E => Script::Latin, 0x2160..=0x2182 => Script::Latin, 0x2183..=0x2184 => Script::Latin, 0x2185..=0x2188 => Script::Latin, 0x2C60..=0x2C7B => Script::Latin, 0x2C7C..=0x2C7D => Script::Latin, 0x2C7E..=0x2C7F => Script::Latin, 0xA722..=0xA76F => Script::Latin, 0xA770 => Script::Latin, 0xA771..=0xA787 => Script::Latin, 0xA78B..=0xA78E => Script::Latin, 0xA78F => Script::Latin, 0xA790..=0xA7AE => Script::Latin, 0xA7B0..=0xA7B7 => Script::Latin, 0xA7F7 => Script::Latin, 0xA7F8..=0xA7F9 => Script::Latin, 0xA7FA => Script::Latin, 0xA7FB..=0xA7FF => Script::Latin, 0xAB30..=0xAB5A => Script::Latin, 0xAB5C..=0xAB5F => Script::Latin, 0xAB60..=0xAB64 => Script::Latin, 0xFB00..=0xFB06 => Script::Latin, 0xFF21..=0xFF3A => Script::Latin, 0xFF41..=0xFF5A => Script::Latin, 0x0370..=0x0373 => Script::Greek, 0x0375 => Script::Greek, 0x0376..=0x0377 => Script::Greek, 0x037A => Script::Greek, 0x037B..=0x037D => Script::Greek, 0x037F => Script::Greek, 0x0384 => Script::Greek, 0x0386 => Script::Greek, 0x0388..=0x038A => Script::Greek, 0x038C => Script::Greek, 0x038E..=0x03A1 => Script::Greek, 0x03A3..=0x03E1 => Script::Greek, 0x03F0..=0x03F5 => Script::Greek, 0x03F6 => Script::Greek, 0x03F7..=0x03FF => Script::Greek, 0x1D26..=0x1D2A => Script::Greek, 0x1D5D..=0x1D61 => Script::Greek, 0x1D66..=0x1D6A => Script::Greek, 0x1DBF => Script::Greek, 0x1F00..=0x1F15 => Script::Greek, 0x1F18..=0x1F1D => Script::Greek, 0x1F20..=0x1F45 => Script::Greek, 0x1F48..=0x1F4D => Script::Greek, 0x1F50..=0x1F57 => Script::Greek, 0x1F59 => Script::Greek, 0x1F5B => Script::Greek, 0x1F5D => Script::Greek, 0x1F5F..=0x1F7D => Script::Greek, 0x1F80..=0x1FB4 => Script::Greek, 0x1FB6..=0x1FBC => Script::Greek, 0x1FBD => Script::Greek, 0x1FBE => Script::Greek, 0x1FBF..=0x1FC1 => Script::Greek, 0x1FC2..=0x1FC4 => Script::Greek, 0x1FC6..=0x1FCC => Script::Greek, 0x1FCD..=0x1FCF => Script::Greek, 0x1FD0..=0x1FD3 => Script::Greek, 0x1FD6..=0x1FDB => Script::Greek, 0x1FDD..=0x1FDF => Script::Greek, 0x1FE0..=0x1FEC => Script::Greek, 0x1FED..=0x1FEF => Script::Greek, 0x1FF2..=0x1FF4 => Script::Greek, 0x1FF6..=0x1FFC => Script::Greek, 0x1FFD..=0x1FFE => Script::Greek, 0x2126 => Script::Greek, 0xAB65 => Script::Greek, 0x10140..=0x10174 => Script::Greek, 0x10175..=0x10178 => Script::Greek, 0x10179..=0x10189 => Script::Greek, 0x1018A..=0x1018B => Script::Greek, 0x1018C..=0x1018E => Script::Greek, 0x101A0 => Script::Greek, 0x1D200..=0x1D241 => Script::Greek, 0x1D242..=0x1D244 => Script::Greek, 0x1D245 => Script::Greek, 0x0400..=0x0481 => Script::Cyrillic, 0x0482 => Script::Cyrillic, 0x0483..=0x0484 => Script::Cyrillic, 0x0487 => Script::Cyrillic, 0x0488..=0x0489 => Script::Cyrillic, 0x048A..=0x052F => Script::Cyrillic, 0x1C80..=0x1C88 => Script::Cyrillic, 0x1D2B => Script::Cyrillic, 0x1D78 => Script::Cyrillic, 0x2DE0..=0x2DFF => Script::Cyrillic, 0xA640..=0xA66D => Script::Cyrillic, 0xA66E => Script::Cyrillic, 0xA66F => Script::Cyrillic, 0xA670..=0xA672 => Script::Cyrillic, 0xA673 => Script::Cyrillic, 0xA674..=0xA67D => Script::Cyrillic, 0xA67E => Script::Cyrillic, 0xA67F => Script::Cyrillic, 0xA680..=0xA69B => Script::Cyrillic, 0xA69C..=0xA69D => Script::Cyrillic, 0xA69E..=0xA69F => Script::Cyrillic, 0xFE2E..=0xFE2F => Script::Cyrillic, 0x0531..=0x0556 => Script::Armenian, 0x0559 => Script::Armenian, 0x055A..=0x055F => Script::Armenian, 0x0561..=0x0587 => Script::Armenian, 0x058A => Script::Armenian, 0x058D..=0x058E => Script::Armenian, 0x058F => Script::Armenian, 0xFB13..=0xFB17 => Script::Armenian, 0x0591..=0x05BD => Script::Hebrew, 0x05BE => Script::Hebrew, 0x05BF => Script::Hebrew, 0x05C0 => Script::Hebrew, 0x05C1..=0x05C2 => Script::Hebrew, 0x05C3 => Script::Hebrew, 0x05C4..=0x05C5 => Script::Hebrew, 0x05C6 => Script::Hebrew, 0x05C7 => Script::Hebrew, 0x05D0..=0x05EA => Script::Hebrew, 0x05F0..=0x05F2 => Script::Hebrew, 0x05F3..=0x05F4 => Script::Hebrew, 0xFB1D => Script::Hebrew, 0xFB1E => Script::Hebrew, 0xFB1F..=0xFB28 => Script::Hebrew, 0xFB29 => Script::Hebrew, 0xFB2A..=0xFB36 => Script::Hebrew, 0xFB38..=0xFB3C => Script::Hebrew, 0xFB3E => Script::Hebrew, 0xFB40..=0xFB41 => Script::Hebrew, 0xFB43..=0xFB44 => Script::Hebrew, 0xFB46..=0xFB4F => Script::Hebrew, 0x0600..=0x0604 => Script::Arabic, 0x0606..=0x0608 => Script::Arabic, 0x0609..=0x060A => Script::Arabic, 0x060B => Script::Arabic, 0x060D => Script::Arabic, 0x060E..=0x060F => Script::Arabic, 0x0610..=0x061A => Script::Arabic, 0x061E => Script::Arabic, 0x0620..=0x063F => Script::Arabic, 0x0641..=0x064A => Script::Arabic, 0x0656..=0x065F => Script::Arabic, 0x0660..=0x0669 => Script::Arabic, 0x066A..=0x066D => Script::Arabic, 0x066E..=0x066F => Script::Arabic, 0x0671..=0x06D3 => Script::Arabic, 0x06D4 => Script::Arabic, 0x06D5 => Script::Arabic, 0x06D6..=0x06DC => Script::Arabic, 0x06DE => Script::Arabic, 0x06DF..=0x06E4 => Script::Arabic, 0x06E5..=0x06E6 => Script::Arabic, 0x06E7..=0x06E8 => Script::Arabic, 0x06E9 => Script::Arabic, 0x06EA..=0x06ED => Script::Arabic, 0x06EE..=0x06EF => Script::Arabic, 0x06F0..=0x06F9 => Script::Arabic, 0x06FA..=0x06FC => Script::Arabic, 0x06FD..=0x06FE => Script::Arabic, 0x06FF => Script::Arabic, 0x0750..=0x077F => Script::Arabic, 0x08A0..=0x08B4 => Script::Arabic, 0x08B6..=0x08BD => Script::Arabic, 0x08D4..=0x08E1 => Script::Arabic, 0x08E3..=0x08FF => Script::Arabic, 0xFB50..=0xFBB1 => Script::Arabic, 0xFBB2..=0xFBC1 => Script::Arabic, 0xFBD3..=0xFD3D => Script::Arabic, 0xFD50..=0xFD8F => Script::Arabic, 0xFD92..=0xFDC7 => Script::Arabic, 0xFDF0..=0xFDFB => Script::Arabic, 0xFDFC => Script::Arabic, 0xFDFD => Script::Arabic, 0xFE70..=0xFE74 => Script::Arabic, 0xFE76..=0xFEFC => Script::Arabic, 0x10E60..=0x10E7E => Script::Arabic, 0x1EE00..=0x1EE03 => Script::Arabic, 0x1EE05..=0x1EE1F => Script::Arabic, 0x1EE21..=0x1EE22 => Script::Arabic, 0x1EE24 => Script::Arabic, 0x1EE27 => Script::Arabic, 0x1EE29..=0x1EE32 => Script::Arabic, 0x1EE34..=0x1EE37 => Script::Arabic, 0x1EE39 => Script::Arabic, 0x1EE3B => Script::Arabic, 0x1EE42 => Script::Arabic, 0x1EE47 => Script::Arabic, 0x1EE49 => Script::Arabic, 0x1EE4B => Script::Arabic, 0x1EE4D..=0x1EE4F => Script::Arabic, 0x1EE51..=0x1EE52 => Script::Arabic, 0x1EE54 => Script::Arabic, 0x1EE57 => Script::Arabic, 0x1EE59 => Script::Arabic, 0x1EE5B => Script::Arabic, 0x1EE5D => Script::Arabic, 0x1EE5F => Script::Arabic, 0x1EE61..=0x1EE62 => Script::Arabic, 0x1EE64 => Script::Arabic, 0x1EE67..=0x1EE6A => Script::Arabic, 0x1EE6C..=0x1EE72 => Script::Arabic, 0x1EE74..=0x1EE77 => Script::Arabic, 0x1EE79..=0x1EE7C => Script::Arabic, 0x1EE7E => Script::Arabic, 0x1EE80..=0x1EE89 => Script::Arabic, 0x1EE8B..=0x1EE9B => Script::Arabic, 0x1EEA1..=0x1EEA3 => Script::Arabic, 0x1EEA5..=0x1EEA9 => Script::Arabic, 0x1EEAB..=0x1EEBB => Script::Arabic, 0x1EEF0..=0x1EEF1 => Script::Arabic, 0x0700..=0x070D => Script::Syriac, 0x070F => Script::Syriac, 0x0710 => Script::Syriac, 0x0711 => Script::Syriac, 0x0712..=0x072F => Script::Syriac, 0x0730..=0x074A => Script::Syriac, 0x074D..=0x074F => Script::Syriac, 0x0780..=0x07A5 => Script::Thaana, 0x07A6..=0x07B0 => Script::Thaana, 0x07B1 => Script::Thaana, 0x0900..=0x0902 => Script::Devanagari, 0x0903 => Script::Devanagari, 0x0904..=0x0939 => Script::Devanagari, 0x093A => Script::Devanagari, 0x093B => Script::Devanagari, 0x093C => Script::Devanagari, 0x093D => Script::Devanagari, 0x093E..=0x0940 => Script::Devanagari, 0x0941..=0x0948 => Script::Devanagari, 0x0949..=0x094C => Script::Devanagari, 0x094D => Script::Devanagari, 0x094E..=0x094F => Script::Devanagari, 0x0950 => Script::Devanagari, 0x0953..=0x0957 => Script::Devanagari, 0x0958..=0x0961 => Script::Devanagari, 0x0962..=0x0963 => Script::Devanagari, 0x0966..=0x096F => Script::Devanagari, 0x0970 => Script::Devanagari, 0x0971 => Script::Devanagari, 0x0972..=0x097F => Script::Devanagari, 0xA8E0..=0xA8F1 => Script::Devanagari, 0xA8F2..=0xA8F7 => Script::Devanagari, 0xA8F8..=0xA8FA => Script::Devanagari, 0xA8FB => Script::Devanagari, 0xA8FC => Script::Devanagari, 0xA8FD => Script::Devanagari, 0x0980 => Script::Bengali, 0x0981 => Script::Bengali, 0x0982..=0x0983 => Script::Bengali, 0x0985..=0x098C => Script::Bengali, 0x098F..=0x0990 => Script::Bengali, 0x0993..=0x09A8 => Script::Bengali, 0x09AA..=0x09B0 => Script::Bengali, 0x09B2 => Script::Bengali, 0x09B6..=0x09B9 => Script::Bengali, 0x09BC => Script::Bengali, 0x09BD => Script::Bengali, 0x09BE..=0x09C0 => Script::Bengali, 0x09C1..=0x09C4 => Script::Bengali, 0x09C7..=0x09C8 => Script::Bengali, 0x09CB..=0x09CC => Script::Bengali, 0x09CD => Script::Bengali, 0x09CE => Script::Bengali, 0x09D7 => Script::Bengali, 0x09DC..=0x09DD => Script::Bengali, 0x09DF..=0x09E1 => Script::Bengali, 0x09E2..=0x09E3 => Script::Bengali, 0x09E6..=0x09EF => Script::Bengali, 0x09F0..=0x09F1 => Script::Bengali, 0x09F2..=0x09F3 => Script::Bengali, 0x09F4..=0x09F9 => Script::Bengali, 0x09FA => Script::Bengali, 0x09FB => Script::Bengali, 0x0A01..=0x0A02 => Script::Gurmukhi, 0x0A03 => Script::Gurmukhi, 0x0A05..=0x0A0A => Script::Gurmukhi, 0x0A0F..=0x0A10 => Script::Gurmukhi, 0x0A13..=0x0A28 => Script::Gurmukhi, 0x0A2A..=0x0A30 => Script::Gurmukhi, 0x0A32..=0x0A33 => Script::Gurmukhi, 0x0A35..=0x0A36 => Script::Gurmukhi, 0x0A38..=0x0A39 => Script::Gurmukhi, 0x0A3C => Script::Gurmukhi, 0x0A3E..=0x0A40 => Script::Gurmukhi, 0x0A41..=0x0A42 => Script::Gurmukhi, 0x0A47..=0x0A48 => Script::Gurmukhi, 0x0A4B..=0x0A4D => Script::Gurmukhi, 0x0A51 => Script::Gurmukhi, 0x0A59..=0x0A5C => Script::Gurmukhi, 0x0A5E => Script::Gurmukhi, 0x0A66..=0x0A6F => Script::Gurmukhi, 0x0A70..=0x0A71 => Script::Gurmukhi, 0x0A72..=0x0A74 => Script::Gurmukhi, 0x0A75 => Script::Gurmukhi, 0x0A81..=0x0A82 => Script::Gujarati, 0x0A83 => Script::Gujarati, 0x0A85..=0x0A8D => Script::Gujarati, 0x0A8F..=0x0A91 => Script::Gujarati, 0x0A93..=0x0AA8 => Script::Gujarati, 0x0AAA..=0x0AB0 => Script::Gujarati, 0x0AB2..=0x0AB3 => Script::Gujarati, 0x0AB5..=0x0AB9 => Script::Gujarati, 0x0ABC => Script::Gujarati, 0x0ABD => Script::Gujarati, 0x0ABE..=0x0AC0 => Script::Gujarati, 0x0AC1..=0x0AC5 => Script::Gujarati, 0x0AC7..=0x0AC8 => Script::Gujarati, 0x0AC9 => Script::Gujarati, 0x0ACB..=0x0ACC => Script::Gujarati, 0x0ACD => Script::Gujarati, 0x0AD0 => Script::Gujarati, 0x0AE0..=0x0AE1 => Script::Gujarati, 0x0AE2..=0x0AE3 => Script::Gujarati, 0x0AE6..=0x0AEF => Script::Gujarati, 0x0AF0 => Script::Gujarati, 0x0AF1 => Script::Gujarati, 0x0AF9 => Script::Gujarati, 0x0B01 => Script::Oriya, 0x0B02..=0x0B03 => Script::Oriya, 0x0B05..=0x0B0C => Script::Oriya, 0x0B0F..=0x0B10 => Script::Oriya, 0x0B13..=0x0B28 => Script::Oriya, 0x0B2A..=0x0B30 => Script::Oriya, 0x0B32..=0x0B33 => Script::Oriya, 0x0B35..=0x0B39 => Script::Oriya, 0x0B3C => Script::Oriya, 0x0B3D => Script::Oriya, 0x0B3E => Script::Oriya, 0x0B3F => Script::Oriya, 0x0B40 => Script::Oriya, 0x0B41..=0x0B44 => Script::Oriya, 0x0B47..=0x0B48 => Script::Oriya, 0x0B4B..=0x0B4C => Script::Oriya, 0x0B4D => Script::Oriya, 0x0B56 => Script::Oriya, 0x0B57 => Script::Oriya, 0x0B5C..=0x0B5D => Script::Oriya, 0x0B5F..=0x0B61 => Script::Oriya, 0x0B62..=0x0B63 => Script::Oriya, 0x0B66..=0x0B6F => Script::Oriya, 0x0B70 => Script::Oriya, 0x0B71 => Script::Oriya, 0x0B72..=0x0B77 => Script::Oriya, 0x0B82 => Script::Tamil, 0x0B83 => Script::Tamil, 0x0B85..=0x0B8A => Script::Tamil, 0x0B8E..=0x0B90 => Script::Tamil, 0x0B92..=0x0B95 => Script::Tamil, 0x0B99..=0x0B9A => Script::Tamil, 0x0B9C => Script::Tamil, 0x0B9E..=0x0B9F => Script::Tamil, 0x0BA3..=0x0BA4 => Script::Tamil, 0x0BA8..=0x0BAA => Script::Tamil, 0x0BAE..=0x0BB9 => Script::Tamil, 0x0BBE..=0x0BBF => Script::Tamil, 0x0BC0 => Script::Tamil, 0x0BC1..=0x0BC2 => Script::Tamil, 0x0BC6..=0x0BC8 => Script::Tamil, 0x0BCA..=0x0BCC => Script::Tamil, 0x0BCD => Script::Tamil, 0x0BD0 => Script::Tamil, 0x0BD7 => Script::Tamil, 0x0BE6..=0x0BEF => Script::Tamil, 0x0BF0..=0x0BF2 => Script::Tamil, 0x0BF3..=0x0BF8 => Script::Tamil, 0x0BF9 => Script::Tamil, 0x0BFA => Script::Tamil, 0x0C00 => Script::Telugu, 0x0C01..=0x0C03 => Script::Telugu, 0x0C05..=0x0C0C => Script::Telugu, 0x0C0E..=0x0C10 => Script::Telugu, 0x0C12..=0x0C28 => Script::Telugu, 0x0C2A..=0x0C39 => Script::Telugu, 0x0C3D => Script::Telugu, 0x0C3E..=0x0C40 => Script::Telugu, 0x0C41..=0x0C44 => Script::Telugu, 0x0C46..=0x0C48 => Script::Telugu, 0x0C4A..=0x0C4D => Script::Telugu, 0x0C55..=0x0C56 => Script::Telugu, 0x0C58..=0x0C5A => Script::Telugu, 0x0C60..=0x0C61 => Script::Telugu, 0x0C62..=0x0C63 => Script::Telugu, 0x0C66..=0x0C6F => Script::Telugu, 0x0C78..=0x0C7E => Script::Telugu, 0x0C7F => Script::Telugu, 0x0C80 => Script::Kannada, 0x0C81 => Script::Kannada, 0x0C82..=0x0C83 => Script::Kannada, 0x0C85..=0x0C8C => Script::Kannada, 0x0C8E..=0x0C90 => Script::Kannada, 0x0C92..=0x0CA8 => Script::Kannada, 0x0CAA..=0x0CB3 => Script::Kannada, 0x0CB5..=0x0CB9 => Script::Kannada, 0x0CBC => Script::Kannada, 0x0CBD => Script::Kannada, 0x0CBE => Script::Kannada, 0x0CBF => Script::Kannada, 0x0CC0..=0x0CC4 => Script::Kannada, 0x0CC6 => Script::Kannada, 0x0CC7..=0x0CC8 => Script::Kannada, 0x0CCA..=0x0CCB => Script::Kannada, 0x0CCC..=0x0CCD => Script::Kannada, 0x0CD5..=0x0CD6 => Script::Kannada, 0x0CDE => Script::Kannada, 0x0CE0..=0x0CE1 => Script::Kannada, 0x0CE2..=0x0CE3 => Script::Kannada, 0x0CE6..=0x0CEF => Script::Kannada, 0x0CF1..=0x0CF2 => Script::Kannada, 0x0D01 => Script::Malayalam, 0x0D02..=0x0D03 => Script::Malayalam, 0x0D05..=0x0D0C => Script::Malayalam, 0x0D0E..=0x0D10 => Script::Malayalam, 0x0D12..=0x0D3A => Script::Malayalam, 0x0D3D => Script::Malayalam, 0x0D3E..=0x0D40 => Script::Malayalam, 0x0D41..=0x0D44 => Script::Malayalam, 0x0D46..=0x0D48 => Script::Malayalam, 0x0D4A..=0x0D4C => Script::Malayalam, 0x0D4D => Script::Malayalam, 0x0D4E => Script::Malayalam, 0x0D4F => Script::Malayalam, 0x0D54..=0x0D56 => Script::Malayalam, 0x0D57 => Script::Malayalam, 0x0D58..=0x0D5E => Script::Malayalam, 0x0D5F..=0x0D61 => Script::Malayalam, 0x0D62..=0x0D63 => Script::Malayalam, 0x0D66..=0x0D6F => Script::Malayalam, 0x0D70..=0x0D78 => Script::Malayalam, 0x0D79 => Script::Malayalam, 0x0D7A..=0x0D7F => Script::Malayalam, 0x0D82..=0x0D83 => Script::Sinhala, 0x0D85..=0x0D96 => Script::Sinhala, 0x0D9A..=0x0DB1 => Script::Sinhala, 0x0DB3..=0x0DBB => Script::Sinhala, 0x0DBD => Script::Sinhala, 0x0DC0..=0x0DC6 => Script::Sinhala, 0x0DCA => Script::Sinhala, 0x0DCF..=0x0DD1 => Script::Sinhala, 0x0DD2..=0x0DD4 => Script::Sinhala, 0x0DD6 => Script::Sinhala, 0x0DD8..=0x0DDF => Script::Sinhala, 0x0DE6..=0x0DEF => Script::Sinhala, 0x0DF2..=0x0DF3 => Script::Sinhala, 0x0DF4 => Script::Sinhala, 0x111E1..=0x111F4 => Script::Sinhala, 0x0E01..=0x0E30 => Script::Thai, 0x0E31 => Script::Thai, 0x0E32..=0x0E33 => Script::Thai, 0x0E34..=0x0E3A => Script::Thai, 0x0E40..=0x0E45 => Script::Thai, 0x0E46 => Script::Thai, 0x0E47..=0x0E4E => Script::Thai, 0x0E4F => Script::Thai, 0x0E50..=0x0E59 => Script::Thai, 0x0E5A..=0x0E5B => Script::Thai, 0x0E81..=0x0E82 => Script::Lao, 0x0E84 => Script::Lao, 0x0E87..=0x0E88 => Script::Lao, 0x0E8A => Script::Lao, 0x0E8D => Script::Lao, 0x0E94..=0x0E97 => Script::Lao, 0x0E99..=0x0E9F => Script::Lao, 0x0EA1..=0x0EA3 => Script::Lao, 0x0EA5 => Script::Lao, 0x0EA7 => Script::Lao, 0x0EAA..=0x0EAB => Script::Lao, 0x0EAD..=0x0EB0 => Script::Lao, 0x0EB1 => Script::Lao, 0x0EB2..=0x0EB3 => Script::Lao, 0x0EB4..=0x0EB9 => Script::Lao, 0x0EBB..=0x0EBC => Script::Lao, 0x0EBD => Script::Lao, 0x0EC0..=0x0EC4 => Script::Lao, 0x0EC6 => Script::Lao, 0x0EC8..=0x0ECD => Script::Lao, 0x0ED0..=0x0ED9 => Script::Lao, 0x0EDC..=0x0EDF => Script::Lao, 0x0F00 => Script::Tibetan, 0x0F01..=0x0F03 => Script::Tibetan, 0x0F04..=0x0F12 => Script::Tibetan, 0x0F13 => Script::Tibetan, 0x0F14 => Script::Tibetan, 0x0F15..=0x0F17 => Script::Tibetan, 0x0F18..=0x0F19 => Script::Tibetan, 0x0F1A..=0x0F1F => Script::Tibetan, 0x0F20..=0x0F29 => Script::Tibetan, 0x0F2A..=0x0F33 => Script::Tibetan, 0x0F34 => Script::Tibetan, 0x0F35 => Script::Tibetan, 0x0F36 => Script::Tibetan, 0x0F37 => Script::Tibetan, 0x0F38 => Script::Tibetan, 0x0F39 => Script::Tibetan, 0x0F3A => Script::Tibetan, 0x0F3B => Script::Tibetan, 0x0F3C => Script::Tibetan, 0x0F3D => Script::Tibetan, 0x0F3E..=0x0F3F => Script::Tibetan, 0x0F40..=0x0F47 => Script::Tibetan, 0x0F49..=0x0F6C => Script::Tibetan, 0x0F71..=0x0F7E => Script::Tibetan, 0x0F7F => Script::Tibetan, 0x0F80..=0x0F84 => Script::Tibetan, 0x0F85 => Script::Tibetan, 0x0F86..=0x0F87 => Script::Tibetan, 0x0F88..=0x0F8C => Script::Tibetan, 0x0F8D..=0x0F97 => Script::Tibetan, 0x0F99..=0x0FBC => Script::Tibetan, 0x0FBE..=0x0FC5 => Script::Tibetan, 0x0FC6 => Script::Tibetan, 0x0FC7..=0x0FCC => Script::Tibetan, 0x0FCE..=0x0FCF => Script::Tibetan, 0x0FD0..=0x0FD4 => Script::Tibetan, 0x0FD9..=0x0FDA => Script::Tibetan, 0x1000..=0x102A => Script::Myanmar, 0x102B..=0x102C => Script::Myanmar, 0x102D..=0x1030 => Script::Myanmar, 0x1031 => Script::Myanmar, 0x1032..=0x1037 => Script::Myanmar, 0x1038 => Script::Myanmar, 0x1039..=0x103A => Script::Myanmar, 0x103B..=0x103C => Script::Myanmar, 0x103D..=0x103E => Script::Myanmar, 0x103F => Script::Myanmar, 0x1040..=0x1049 => Script::Myanmar, 0x104A..=0x104F => Script::Myanmar, 0x1050..=0x1055 => Script::Myanmar, 0x1056..=0x1057 => Script::Myanmar, 0x1058..=0x1059 => Script::Myanmar, 0x105A..=0x105D => Script::Myanmar, 0x105E..=0x1060 => Script::Myanmar, 0x1061 => Script::Myanmar, 0x1062..=0x1064 => Script::Myanmar, 0x1065..=0x1066 => Script::Myanmar, 0x1067..=0x106D => Script::Myanmar, 0x106E..=0x1070 => Script::Myanmar, 0x1071..=0x1074 => Script::Myanmar, 0x1075..=0x1081 => Script::Myanmar, 0x1082 => Script::Myanmar, 0x1083..=0x1084 => Script::Myanmar, 0x1085..=0x1086 => Script::Myanmar, 0x1087..=0x108C => Script::Myanmar, 0x108D => Script::Myanmar, 0x108E => Script::Myanmar, 0x108F => Script::Myanmar, 0x1090..=0x1099 => Script::Myanmar, 0x109A..=0x109C => Script::Myanmar, 0x109D => Script::Myanmar, 0x109E..=0x109F => Script::Myanmar, 0xA9E0..=0xA9E4 => Script::Myanmar, 0xA9E5 => Script::Myanmar, 0xA9E6 => Script::Myanmar, 0xA9E7..=0xA9EF => Script::Myanmar, 0xA9F0..=0xA9F9 => Script::Myanmar, 0xA9FA..=0xA9FE => Script::Myanmar, 0xAA60..=0xAA6F => Script::Myanmar, 0xAA70 => Script::Myanmar, 0xAA71..=0xAA76 => Script::Myanmar, 0xAA77..=0xAA79 => Script::Myanmar, 0xAA7A => Script::Myanmar, 0xAA7B => Script::Myanmar, 0xAA7C => Script::Myanmar, 0xAA7D => Script::Myanmar, 0xAA7E..=0xAA7F => Script::Myanmar, 0x10A0..=0x10C5 => Script::Georgian, 0x10C7 => Script::Georgian, 0x10CD => Script::Georgian, 0x10D0..=0x10FA => Script::Georgian, 0x10FC => Script::Georgian, 0x10FD..=0x10FF => Script::Georgian, 0x2D00..=0x2D25 => Script::Georgian, 0x2D27 => Script::Georgian, 0x2D2D => Script::Georgian, 0x1100..=0x11FF => Script::Hangul, 0x302E..=0x302F => Script::Hangul, 0x3131..=0x318E => Script::Hangul, 0x3200..=0x321E => Script::Hangul, 0x3260..=0x327E => Script::Hangul, 0xA960..=0xA97C => Script::Hangul, 0xAC00..=0xD7A3 => Script::Hangul, 0xD7B0..=0xD7C6 => Script::Hangul, 0xD7CB..=0xD7FB => Script::Hangul, 0xFFA0..=0xFFBE => Script::Hangul, 0xFFC2..=0xFFC7 => Script::Hangul, 0xFFCA..=0xFFCF => Script::Hangul, 0xFFD2..=0xFFD7 => Script::Hangul, 0xFFDA..=0xFFDC => Script::Hangul, 0x1200..=0x1248 => Script::Ethiopic, 0x124A..=0x124D => Script::Ethiopic, 0x1250..=0x1256 => Script::Ethiopic, 0x1258 => Script::Ethiopic, 0x125A..=0x125D => Script::Ethiopic, 0x1260..=0x1288 => Script::Ethiopic, 0x128A..=0x128D => Script::Ethiopic, 0x1290..=0x12B0 => Script::Ethiopic, 0x12B2..=0x12B5 => Script::Ethiopic, 0x12B8..=0x12BE => Script::Ethiopic, 0x12C0 => Script::Ethiopic, 0x12C2..=0x12C5 => Script::Ethiopic, 0x12C8..=0x12D6 => Script::Ethiopic, 0x12D8..=0x1310 => Script::Ethiopic, 0x1312..=0x1315 => Script::Ethiopic, 0x1318..=0x135A => Script::Ethiopic, 0x135D..=0x135F => Script::Ethiopic, 0x1360..=0x1368 => Script::Ethiopic, 0x1369..=0x137C => Script::Ethiopic, 0x1380..=0x138F => Script::Ethiopic, 0x1390..=0x1399 => Script::Ethiopic, 0x2D80..=0x2D96 => Script::Ethiopic, 0x2DA0..=0x2DA6 => Script::Ethiopic, 0x2DA8..=0x2DAE => Script::Ethiopic, 0x2DB0..=0x2DB6 => Script::Ethiopic, 0x2DB8..=0x2DBE => Script::Ethiopic, 0x2DC0..=0x2DC6 => Script::Ethiopic, 0x2DC8..=0x2DCE => Script::Ethiopic, 0x2DD0..=0x2DD6 => Script::Ethiopic, 0x2DD8..=0x2DDE => Script::Ethiopic, 0xAB01..=0xAB06 => Script::Ethiopic, 0xAB09..=0xAB0E => Script::Ethiopic, 0xAB11..=0xAB16 => Script::Ethiopic, 0xAB20..=0xAB26 => Script::Ethiopic, 0xAB28..=0xAB2E => Script::Ethiopic, 0x13A0..=0x13F5 => Script::Cherokee, 0x13F8..=0x13FD => Script::Cherokee, 0xAB70..=0xABBF => Script::Cherokee, 0x1400 => Script::CanadianAboriginal, 0x1401..=0x166C => Script::CanadianAboriginal, 0x166D..=0x166E => Script::CanadianAboriginal, 0x166F..=0x167F => Script::CanadianAboriginal, 0x18B0..=0x18F5 => Script::CanadianAboriginal, 0x1680 => Script::Ogham, 0x1681..=0x169A => Script::Ogham, 0x169B => Script::Ogham, 0x169C => Script::Ogham, 0x16A0..=0x16EA => Script::Runic, 0x16EE..=0x16F0 => Script::Runic, 0x16F1..=0x16F8 => Script::Runic, 0x1780..=0x17B3 => Script::Khmer, 0x17B4..=0x17B5 => Script::Khmer, 0x17B6 => Script::Khmer, 0x17B7..=0x17BD => Script::Khmer, 0x17BE..=0x17C5 => Script::Khmer, 0x17C6 => Script::Khmer, 0x17C7..=0x17C8 => Script::Khmer, 0x17C9..=0x17D3 => Script::Khmer, 0x17D4..=0x17D6 => Script::Khmer, 0x17D7 => Script::Khmer, 0x17D8..=0x17DA => Script::Khmer, 0x17DB => Script::Khmer, 0x17DC => Script::Khmer, 0x17DD => Script::Khmer, 0x17E0..=0x17E9 => Script::Khmer, 0x17F0..=0x17F9 => Script::Khmer, 0x19E0..=0x19FF => Script::Khmer, 0x1800..=0x1801 => Script::Mongolian, 0x1804 => Script::Mongolian, 0x1806 => Script::Mongolian, 0x1807..=0x180A => Script::Mongolian, 0x180B..=0x180D => Script::Mongolian, 0x180E => Script::Mongolian, 0x1810..=0x1819 => Script::Mongolian, 0x1820..=0x1842 => Script::Mongolian, 0x1843 => Script::Mongolian, 0x1844..=0x1877 => Script::Mongolian, 0x1880..=0x1884 => Script::Mongolian, 0x1885..=0x1886 => Script::Mongolian, 0x1887..=0x18A8 => Script::Mongolian, 0x18A9 => Script::Mongolian, 0x18AA => Script::Mongolian, 0x11660..=0x1166C => Script::Mongolian, 0x3041..=0x3096 => Script::Hiragana, 0x309D..=0x309E => Script::Hiragana, 0x309F => Script::Hiragana, 0x1B001 => Script::Hiragana, 0x1F200 => Script::Hiragana, 0x30A1..=0x30FA => Script::Katakana, 0x30FD..=0x30FE => Script::Katakana, 0x30FF => Script::Katakana, 0x31F0..=0x31FF => Script::Katakana, 0x32D0..=0x32FE => Script::Katakana, 0x3300..=0x3357 => Script::Katakana, 0xFF66..=0xFF6F => Script::Katakana, 0xFF71..=0xFF9D => Script::Katakana, 0x1B000 => Script::Katakana, 0x02EA..=0x02EB => Script::Bopomofo, 0x3105..=0x312D => Script::Bopomofo, 0x31A0..=0x31BA => Script::Bopomofo, 0x2E80..=0x2E99 => Script::Han, 0x2E9B..=0x2EF3 => Script::Han, 0x2F00..=0x2FD5 => Script::Han, 0x3005 => Script::Han, 0x3007 => Script::Han, 0x3021..=0x3029 => Script::Han, 0x3038..=0x303A => Script::Han, 0x303B => Script::Han, 0x3400..=0x4DB5 => Script::Han, 0x4E00..=0x9FD5 => Script::Han, 0xF900..=0xFA6D => Script::Han, 0xFA70..=0xFAD9 => Script::Han, 0x20000..=0x2A6D6 => Script::Han, 0x2A700..=0x2B734 => Script::Han, 0x2B740..=0x2B81D => Script::Han, 0x2B820..=0x2CEA1 => Script::Han, 0x2F800..=0x2FA1D => Script::Han, 0xA000..=0xA014 => Script::Yi, 0xA015 => Script::Yi, 0xA016..=0xA48C => Script::Yi, 0xA490..=0xA4C6 => Script::Yi, 0x10300..=0x1031F => Script::OldItalic, 0x10320..=0x10323 => Script::OldItalic, 0x10330..=0x10340 => Script::Gothic, 0x10341 => Script::Gothic, 0x10342..=0x10349 => Script::Gothic, 0x1034A => Script::Gothic, 0x10400..=0x1044F => Script::Deseret, 0x0300..=0x036F => Script::Inherited, 0x0485..=0x0486 => Script::Inherited, 0x064B..=0x0655 => Script::Inherited, 0x0670 => Script::Inherited, 0x0951..=0x0952 => Script::Inherited, 0x1AB0..=0x1ABD => Script::Inherited, 0x1ABE => Script::Inherited, 0x1CD0..=0x1CD2 => Script::Inherited, 0x1CD4..=0x1CE0 => Script::Inherited, 0x1CE2..=0x1CE8 => Script::Inherited, 0x1CED => Script::Inherited, 0x1CF4 => Script::Inherited, 0x1CF8..=0x1CF9 => Script::Inherited, 0x1DC0..=0x1DF5 => Script::Inherited, 0x1DFB..=0x1DFF => Script::Inherited, 0x200C..=0x200D => Script::Inherited, 0x20D0..=0x20DC => Script::Inherited, 0x20DD..=0x20E0 => Script::Inherited, 0x20E1 => Script::Inherited, 0x20E2..=0x20E4 => Script::Inherited, 0x20E5..=0x20F0 => Script::Inherited, 0x302A..=0x302D => Script::Inherited, 0x3099..=0x309A => Script::Inherited, 0xFE00..=0xFE0F => Script::Inherited, 0xFE20..=0xFE2D => Script::Inherited, 0x101FD => Script::Inherited, 0x102E0 => Script::Inherited, 0x1D167..=0x1D169 => Script::Inherited, 0x1D17B..=0x1D182 => Script::Inherited, 0x1D185..=0x1D18B => Script::Inherited, 0x1D1AA..=0x1D1AD => Script::Inherited, 0xE0100..=0xE01EF => Script::Inherited, 0x1700..=0x170C => Script::Tagalog, 0x170E..=0x1711 => Script::Tagalog, 0x1712..=0x1714 => Script::Tagalog, 0x1720..=0x1731 => Script::Hanunoo, 0x1732..=0x1734 => Script::Hanunoo, 0x1740..=0x1751 => Script::Buhid, 0x1752..=0x1753 => Script::Buhid, 0x1760..=0x176C => Script::Tagbanwa, 0x176E..=0x1770 => Script::Tagbanwa, 0x1772..=0x1773 => Script::Tagbanwa, 0x1900..=0x191E => Script::Limbu, 0x1920..=0x1922 => Script::Limbu, 0x1923..=0x1926 => Script::Limbu, 0x1927..=0x1928 => Script::Limbu, 0x1929..=0x192B => Script::Limbu, 0x1930..=0x1931 => Script::Limbu, 0x1932 => Script::Limbu, 0x1933..=0x1938 => Script::Limbu, 0x1939..=0x193B => Script::Limbu, 0x1940 => Script::Limbu, 0x1944..=0x1945 => Script::Limbu, 0x1946..=0x194F => Script::Limbu, 0x1950..=0x196D => Script::TaiLe, 0x1970..=0x1974 => Script::TaiLe, 0x10000..=0x1000B => Script::LinearB, 0x1000D..=0x10026 => Script::LinearB, 0x10028..=0x1003A => Script::LinearB, 0x1003C..=0x1003D => Script::LinearB, 0x1003F..=0x1004D => Script::LinearB, 0x10050..=0x1005D => Script::LinearB, 0x10080..=0x100FA => Script::LinearB, 0x10380..=0x1039D => Script::Ugaritic, 0x1039F => Script::Ugaritic, 0x10450..=0x1047F => Script::Shavian, 0x10480..=0x1049D => Script::Osmanya, 0x104A0..=0x104A9 => Script::Osmanya, 0x10800..=0x10805 => Script::Cypriot, 0x10808 => Script::Cypriot, 0x1080A..=0x10835 => Script::Cypriot, 0x10837..=0x10838 => Script::Cypriot, 0x1083C => Script::Cypriot, 0x1083F => Script::Cypriot, 0x2800..=0x28FF => Script::Braille, 0x1A00..=0x1A16 => Script::Buginese, 0x1A17..=0x1A18 => Script::Buginese, 0x1A19..=0x1A1A => Script::Buginese, 0x1A1B => Script::Buginese, 0x1A1E..=0x1A1F => Script::Buginese, 0x03E2..=0x03EF => Script::Coptic, 0x2C80..=0x2CE4 => Script::Coptic, 0x2CE5..=0x2CEA => Script::Coptic, 0x2CEB..=0x2CEE => Script::Coptic, 0x2CEF..=0x2CF1 => Script::Coptic, 0x2CF2..=0x2CF3 => Script::Coptic, 0x2CF9..=0x2CFC => Script::Coptic, 0x2CFD => Script::Coptic, 0x2CFE..=0x2CFF => Script::Coptic, 0x1980..=0x19AB => Script::NewTaiLue, 0x19B0..=0x19C9 => Script::NewTaiLue, 0x19D0..=0x19D9 => Script::NewTaiLue, 0x19DA => Script::NewTaiLue, 0x19DE..=0x19DF => Script::NewTaiLue, 0x2C00..=0x2C2E => Script::Glagolitic, 0x2C30..=0x2C5E => Script::Glagolitic, 0x1E000..=0x1E006 => Script::Glagolitic, 0x1E008..=0x1E018 => Script::Glagolitic, 0x1E01B..=0x1E021 => Script::Glagolitic, 0x1E023..=0x1E024 => Script::Glagolitic, 0x1E026..=0x1E02A => Script::Glagolitic, 0x2D30..=0x2D67 => Script::Tifinagh, 0x2D6F => Script::Tifinagh, 0x2D70 => Script::Tifinagh, 0x2D7F => Script::Tifinagh, 0xA800..=0xA801 => Script::SylotiNagri, 0xA802 => Script::SylotiNagri, 0xA803..=0xA805 => Script::SylotiNagri, 0xA806 => Script::SylotiNagri, 0xA807..=0xA80A => Script::SylotiNagri, 0xA80B => Script::SylotiNagri, 0xA80C..=0xA822 => Script::SylotiNagri, 0xA823..=0xA824 => Script::SylotiNagri, 0xA825..=0xA826 => Script::SylotiNagri, 0xA827 => Script::SylotiNagri, 0xA828..=0xA82B => Script::SylotiNagri, 0x103A0..=0x103C3 => Script::OldPersian, 0x103C8..=0x103CF => Script::OldPersian, 0x103D0 => Script::OldPersian, 0x103D1..=0x103D5 => Script::OldPersian, 0x10A00 => Script::Kharoshthi, 0x10A01..=0x10A03 => Script::Kharoshthi, 0x10A05..=0x10A06 => Script::Kharoshthi, 0x10A0C..=0x10A0F => Script::Kharoshthi, 0x10A10..=0x10A13 => Script::Kharoshthi, 0x10A15..=0x10A17 => Script::Kharoshthi, 0x10A19..=0x10A33 => Script::Kharoshthi, 0x10A38..=0x10A3A => Script::Kharoshthi, 0x10A3F => Script::Kharoshthi, 0x10A40..=0x10A47 => Script::Kharoshthi, 0x10A50..=0x10A58 => Script::Kharoshthi, 0x1B00..=0x1B03 => Script::Balinese, 0x1B04 => Script::Balinese, 0x1B05..=0x1B33 => Script::Balinese, 0x1B34 => Script::Balinese, 0x1B35 => Script::Balinese, 0x1B36..=0x1B3A => Script::Balinese, 0x1B3B => Script::Balinese, 0x1B3C => Script::Balinese, 0x1B3D..=0x1B41 => Script::Balinese, 0x1B42 => Script::Balinese, 0x1B43..=0x1B44 => Script::Balinese, 0x1B45..=0x1B4B => Script::Balinese, 0x1B50..=0x1B59 => Script::Balinese, 0x1B5A..=0x1B60 => Script::Balinese, 0x1B61..=0x1B6A => Script::Balinese, 0x1B6B..=0x1B73 => Script::Balinese, 0x1B74..=0x1B7C => Script::Balinese, 0x12000..=0x12399 => Script::Cuneiform, 0x12400..=0x1246E => Script::Cuneiform, 0x12470..=0x12474 => Script::Cuneiform, 0x12480..=0x12543 => Script::Cuneiform, 0x10900..=0x10915 => Script::Phoenician, 0x10916..=0x1091B => Script::Phoenician, 0x1091F => Script::Phoenician, 0xA840..=0xA873 => Script::PhagsPa, 0xA874..=0xA877 => Script::PhagsPa, 0x07C0..=0x07C9 => Script::Nko, 0x07CA..=0x07EA => Script::Nko, 0x07EB..=0x07F3 => Script::Nko, 0x07F4..=0x07F5 => Script::Nko, 0x07F6 => Script::Nko, 0x07F7..=0x07F9 => Script::Nko, 0x07FA => Script::Nko, 0x1B80..=0x1B81 => Script::Sundanese, 0x1B82 => Script::Sundanese, 0x1B83..=0x1BA0 => Script::Sundanese, 0x1BA1 => Script::Sundanese, 0x1BA2..=0x1BA5 => Script::Sundanese, 0x1BA6..=0x1BA7 => Script::Sundanese, 0x1BA8..=0x1BA9 => Script::Sundanese, 0x1BAA => Script::Sundanese, 0x1BAB..=0x1BAD => Script::Sundanese, 0x1BAE..=0x1BAF => Script::Sundanese, 0x1BB0..=0x1BB9 => Script::Sundanese, 0x1BBA..=0x1BBF => Script::Sundanese, 0x1CC0..=0x1CC7 => Script::Sundanese, 0x1C00..=0x1C23 => Script::Lepcha, 0x1C24..=0x1C2B => Script::Lepcha, 0x1C2C..=0x1C33 => Script::Lepcha, 0x1C34..=0x1C35 => Script::Lepcha, 0x1C36..=0x1C37 => Script::Lepcha, 0x1C3B..=0x1C3F => Script::Lepcha, 0x1C40..=0x1C49 => Script::Lepcha, 0x1C4D..=0x1C4F => Script::Lepcha, 0x1C50..=0x1C59 => Script::OlChiki, 0x1C5A..=0x1C77 => Script::OlChiki, 0x1C78..=0x1C7D => Script::OlChiki, 0x1C7E..=0x1C7F => Script::OlChiki, 0xA500..=0xA60B => Script::Vai, 0xA60C => Script::Vai, 0xA60D..=0xA60F => Script::Vai, 0xA610..=0xA61F => Script::Vai, 0xA620..=0xA629 => Script::Vai, 0xA62A..=0xA62B => Script::Vai, 0xA880..=0xA881 => Script::Saurashtra, 0xA882..=0xA8B3 => Script::Saurashtra, 0xA8B4..=0xA8C3 => Script::Saurashtra, 0xA8C4..=0xA8C5 => Script::Saurashtra, 0xA8CE..=0xA8CF => Script::Saurashtra, 0xA8D0..=0xA8D9 => Script::Saurashtra, 0xA900..=0xA909 => Script::KayahLi, 0xA90A..=0xA925 => Script::KayahLi, 0xA926..=0xA92D => Script::KayahLi, 0xA92F => Script::KayahLi, 0xA930..=0xA946 => Script::Rejang, 0xA947..=0xA951 => Script::Rejang, 0xA952..=0xA953 => Script::Rejang, 0xA95F => Script::Rejang, 0x10280..=0x1029C => Script::Lycian, 0x102A0..=0x102D0 => Script::Carian, 0x10920..=0x10939 => Script::Lydian, 0x1093F => Script::Lydian, 0xAA00..=0xAA28 => Script::Cham, 0xAA29..=0xAA2E => Script::Cham, 0xAA2F..=0xAA30 => Script::Cham, 0xAA31..=0xAA32 => Script::Cham, 0xAA33..=0xAA34 => Script::Cham, 0xAA35..=0xAA36 => Script::Cham, 0xAA40..=0xAA42 => Script::Cham, 0xAA43 => Script::Cham, 0xAA44..=0xAA4B => Script::Cham, 0xAA4C => Script::Cham, 0xAA4D => Script::Cham, 0xAA50..=0xAA59 => Script::Cham, 0xAA5C..=0xAA5F => Script::Cham, 0x1A20..=0x1A54 => Script::TaiTham, 0x1A55 => Script::TaiTham, 0x1A56 => Script::TaiTham, 0x1A57 => Script::TaiTham, 0x1A58..=0x1A5E => Script::TaiTham, 0x1A60 => Script::TaiTham, 0x1A61 => Script::TaiTham, 0x1A62 => Script::TaiTham, 0x1A63..=0x1A64 => Script::TaiTham, 0x1A65..=0x1A6C => Script::TaiTham, 0x1A6D..=0x1A72 => Script::TaiTham, 0x1A73..=0x1A7C => Script::TaiTham, 0x1A7F => Script::TaiTham, 0x1A80..=0x1A89 => Script::TaiTham, 0x1A90..=0x1A99 => Script::TaiTham, 0x1AA0..=0x1AA6 => Script::TaiTham, 0x1AA7 => Script::TaiTham, 0x1AA8..=0x1AAD => Script::TaiTham, 0xAA80..=0xAAAF => Script::TaiViet, 0xAAB0 => Script::TaiViet, 0xAAB1 => Script::TaiViet, 0xAAB2..=0xAAB4 => Script::TaiViet, 0xAAB5..=0xAAB6 => Script::TaiViet, 0xAAB7..=0xAAB8 => Script::TaiViet, 0xAAB9..=0xAABD => Script::TaiViet, 0xAABE..=0xAABF => Script::TaiViet, 0xAAC0 => Script::TaiViet, 0xAAC1 => Script::TaiViet, 0xAAC2 => Script::TaiViet, 0xAADB..=0xAADC => Script::TaiViet, 0xAADD => Script::TaiViet, 0xAADE..=0xAADF => Script::TaiViet, 0x10B00..=0x10B35 => Script::Avestan, 0x10B39..=0x10B3F => Script::Avestan, 0x13000..=0x1342E => Script::EgyptianHieroglyphs, 0x0800..=0x0815 => Script::Samaritan, 0x0816..=0x0819 => Script::Samaritan, 0x081A => Script::Samaritan, 0x081B..=0x0823 => Script::Samaritan, 0x0824 => Script::Samaritan, 0x0825..=0x0827 => Script::Samaritan, 0x0828 => Script::Samaritan, 0x0829..=0x082D => Script::Samaritan, 0x0830..=0x083E => Script::Samaritan, 0xA4D0..=0xA4F7 => Script::Lisu, 0xA4F8..=0xA4FD => Script::Lisu, 0xA4FE..=0xA4FF => Script::Lisu, 0xA6A0..=0xA6E5 => Script::Bamum, 0xA6E6..=0xA6EF => Script::Bamum, 0xA6F0..=0xA6F1 => Script::Bamum, 0xA6F2..=0xA6F7 => Script::Bamum, 0x16800..=0x16A38 => Script::Bamum, 0xA980..=0xA982 => Script::Javanese, 0xA983 => Script::Javanese, 0xA984..=0xA9B2 => Script::Javanese, 0xA9B3 => Script::Javanese, 0xA9B4..=0xA9B5 => Script::Javanese, 0xA9B6..=0xA9B9 => Script::Javanese, 0xA9BA..=0xA9BB => Script::Javanese, 0xA9BC => Script::Javanese, 0xA9BD..=0xA9C0 => Script::Javanese, 0xA9C1..=0xA9CD => Script::Javanese, 0xA9D0..=0xA9D9 => Script::Javanese, 0xA9DE..=0xA9DF => Script::Javanese, 0xAAE0..=0xAAEA => Script::MeeteiMayek, 0xAAEB => Script::MeeteiMayek, 0xAAEC..=0xAAED => Script::MeeteiMayek, 0xAAEE..=0xAAEF => Script::MeeteiMayek, 0xAAF0..=0xAAF1 => Script::MeeteiMayek, 0xAAF2 => Script::MeeteiMayek, 0xAAF3..=0xAAF4 => Script::MeeteiMayek, 0xAAF5 => Script::MeeteiMayek, 0xAAF6 => Script::MeeteiMayek, 0xABC0..=0xABE2 => Script::MeeteiMayek, 0xABE3..=0xABE4 => Script::MeeteiMayek, 0xABE5 => Script::MeeteiMayek, 0xABE6..=0xABE7 => Script::MeeteiMayek, 0xABE8 => Script::MeeteiMayek, 0xABE9..=0xABEA => Script::MeeteiMayek, 0xABEB => Script::MeeteiMayek, 0xABEC => Script::MeeteiMayek, 0xABED => Script::MeeteiMayek, 0xABF0..=0xABF9 => Script::MeeteiMayek, 0x10840..=0x10855 => Script::ImperialAramaic, 0x10857 => Script::ImperialAramaic, 0x10858..=0x1085F => Script::ImperialAramaic, 0x10A60..=0x10A7C => Script::OldSouthArabian, 0x10A7D..=0x10A7E => Script::OldSouthArabian, 0x10A7F => Script::OldSouthArabian, 0x10B40..=0x10B55 => Script::InscriptionalParthian, 0x10B58..=0x10B5F => Script::InscriptionalParthian, 0x10B60..=0x10B72 => Script::InscriptionalPahlavi, 0x10B78..=0x10B7F => Script::InscriptionalPahlavi, 0x10C00..=0x10C48 => Script::OldTurkic, 0x11080..=0x11081 => Script::Kaithi, 0x11082 => Script::Kaithi, 0x11083..=0x110AF => Script::Kaithi, 0x110B0..=0x110B2 => Script::Kaithi, 0x110B3..=0x110B6 => Script::Kaithi, 0x110B7..=0x110B8 => Script::Kaithi, 0x110B9..=0x110BA => Script::Kaithi, 0x110BB..=0x110BC => Script::Kaithi, 0x110BD => Script::Kaithi, 0x110BE..=0x110C1 => Script::Kaithi, 0x1BC0..=0x1BE5 => Script::Batak, 0x1BE6 => Script::Batak, 0x1BE7 => Script::Batak, 0x1BE8..=0x1BE9 => Script::Batak, 0x1BEA..=0x1BEC => Script::Batak, 0x1BED => Script::Batak, 0x1BEE => Script::Batak, 0x1BEF..=0x1BF1 => Script::Batak, 0x1BF2..=0x1BF3 => Script::Batak, 0x1BFC..=0x1BFF => Script::Batak, 0x11000 => Script::Brahmi, 0x11001 => Script::Brahmi, 0x11002 => Script::Brahmi, 0x11003..=0x11037 => Script::Brahmi, 0x11038..=0x11046 => Script::Brahmi, 0x11047..=0x1104D => Script::Brahmi, 0x11052..=0x11065 => Script::Brahmi, 0x11066..=0x1106F => Script::Brahmi, 0x1107F => Script::Brahmi, 0x0840..=0x0858 => Script::Mandaic, 0x0859..=0x085B => Script::Mandaic, 0x085E => Script::Mandaic, 0x11100..=0x11102 => Script::Chakma, 0x11103..=0x11126 => Script::Chakma, 0x11127..=0x1112B => Script::Chakma, 0x1112C => Script::Chakma, 0x1112D..=0x11134 => Script::Chakma, 0x11136..=0x1113F => Script::Chakma, 0x11140..=0x11143 => Script::Chakma, 0x109A0..=0x109B7 => Script::MeroiticCursive, 0x109BC..=0x109BD => Script::MeroiticCursive, 0x109BE..=0x109BF => Script::MeroiticCursive, 0x109C0..=0x109CF => Script::MeroiticCursive, 0x109D2..=0x109FF => Script::MeroiticCursive, 0x10980..=0x1099F => Script::MeroiticHieroglyphs, 0x16F00..=0x16F44 => Script::Miao, 0x16F50 => Script::Miao, 0x16F51..=0x16F7E => Script::Miao, 0x16F8F..=0x16F92 => Script::Miao, 0x16F93..=0x16F9F => Script::Miao, 0x11180..=0x11181 => Script::Sharada, 0x11182 => Script::Sharada, 0x11183..=0x111B2 => Script::Sharada, 0x111B3..=0x111B5 => Script::Sharada, 0x111B6..=0x111BE => Script::Sharada, 0x111BF..=0x111C0 => Script::Sharada, 0x111C1..=0x111C4 => Script::Sharada, 0x111C5..=0x111C9 => Script::Sharada, 0x111CA..=0x111CC => Script::Sharada, 0x111CD => Script::Sharada, 0x111D0..=0x111D9 => Script::Sharada, 0x111DA => Script::Sharada, 0x111DB => Script::Sharada, 0x111DC => Script::Sharada, 0x111DD..=0x111DF => Script::Sharada, 0x110D0..=0x110E8 => Script::SoraSompeng, 0x110F0..=0x110F9 => Script::SoraSompeng, 0x11680..=0x116AA => Script::Takri, 0x116AB => Script::Takri, 0x116AC => Script::Takri, 0x116AD => Script::Takri, 0x116AE..=0x116AF => Script::Takri, 0x116B0..=0x116B5 => Script::Takri, 0x116B6 => Script::Takri, 0x116B7 => Script::Takri, 0x116C0..=0x116C9 => Script::Takri, 0x10530..=0x10563 => Script::CaucasianAlbanian, 0x1056F => Script::CaucasianAlbanian, 0x16AD0..=0x16AED => Script::BassaVah, 0x16AF0..=0x16AF4 => Script::BassaVah, 0x16AF5 => Script::BassaVah, 0x1BC00..=0x1BC6A => Script::Duployan, 0x1BC70..=0x1BC7C => Script::Duployan, 0x1BC80..=0x1BC88 => Script::Duployan, 0x1BC90..=0x1BC99 => Script::Duployan, 0x1BC9C => Script::Duployan, 0x1BC9D..=0x1BC9E => Script::Duployan, 0x1BC9F => Script::Duployan, 0x10500..=0x10527 => Script::Elbasan, 0x11300..=0x11301 => Script::Grantha, 0x11302..=0x11303 => Script::Grantha, 0x11305..=0x1130C => Script::Grantha, 0x1130F..=0x11310 => Script::Grantha, 0x11313..=0x11328 => Script::Grantha, 0x1132A..=0x11330 => Script::Grantha, 0x11332..=0x11333 => Script::Grantha, 0x11335..=0x11339 => Script::Grantha, 0x1133C => Script::Grantha, 0x1133D => Script::Grantha, 0x1133E..=0x1133F => Script::Grantha, 0x11340 => Script::Grantha, 0x11341..=0x11344 => Script::Grantha, 0x11347..=0x11348 => Script::Grantha, 0x1134B..=0x1134D => Script::Grantha, 0x11350 => Script::Grantha, 0x11357 => Script::Grantha, 0x1135D..=0x11361 => Script::Grantha, 0x11362..=0x11363 => Script::Grantha, 0x11366..=0x1136C => Script::Grantha, 0x11370..=0x11374 => Script::Grantha, 0x16B00..=0x16B2F => Script::PahawhHmong, 0x16B30..=0x16B36 => Script::PahawhHmong, 0x16B37..=0x16B3B => Script::PahawhHmong, 0x16B3C..=0x16B3F => Script::PahawhHmong, 0x16B40..=0x16B43 => Script::PahawhHmong, 0x16B44 => Script::PahawhHmong, 0x16B45 => Script::PahawhHmong, 0x16B50..=0x16B59 => Script::PahawhHmong, 0x16B5B..=0x16B61 => Script::PahawhHmong, 0x16B63..=0x16B77 => Script::PahawhHmong, 0x16B7D..=0x16B8F => Script::PahawhHmong, 0x11200..=0x11211 => Script::Khojki, 0x11213..=0x1122B => Script::Khojki, 0x1122C..=0x1122E => Script::Khojki, 0x1122F..=0x11231 => Script::Khojki, 0x11232..=0x11233 => Script::Khojki, 0x11234 => Script::Khojki, 0x11235 => Script::Khojki, 0x11236..=0x11237 => Script::Khojki, 0x11238..=0x1123D => Script::Khojki, 0x1123E => Script::Khojki, 0x10600..=0x10736 => Script::LinearA, 0x10740..=0x10755 => Script::LinearA, 0x10760..=0x10767 => Script::LinearA, 0x11150..=0x11172 => Script::Mahajani, 0x11173 => Script::Mahajani, 0x11174..=0x11175 => Script::Mahajani, 0x11176 => Script::Mahajani, 0x10AC0..=0x10AC7 => Script::Manichaean, 0x10AC8 => Script::Manichaean, 0x10AC9..=0x10AE4 => Script::Manichaean, 0x10AE5..=0x10AE6 => Script::Manichaean, 0x10AEB..=0x10AEF => Script::Manichaean, 0x10AF0..=0x10AF6 => Script::Manichaean, 0x1E800..=0x1E8C4 => Script::MendeKikakui, 0x1E8C7..=0x1E8CF => Script::MendeKikakui, 0x1E8D0..=0x1E8D6 => Script::MendeKikakui, 0x11600..=0x1162F => Script::Modi, 0x11630..=0x11632 => Script::Modi, 0x11633..=0x1163A => Script::Modi, 0x1163B..=0x1163C => Script::Modi, 0x1163D => Script::Modi, 0x1163E => Script::Modi, 0x1163F..=0x11640 => Script::Modi, 0x11641..=0x11643 => Script::Modi, 0x11644 => Script::Modi, 0x11650..=0x11659 => Script::Modi, 0x16A40..=0x16A5E => Script::Mro, 0x16A60..=0x16A69 => Script::Mro, 0x16A6E..=0x16A6F => Script::Mro, 0x10A80..=0x10A9C => Script::OldNorthArabian, 0x10A9D..=0x10A9F => Script::OldNorthArabian, 0x10880..=0x1089E => Script::Nabataean, 0x108A7..=0x108AF => Script::Nabataean, 0x10860..=0x10876 => Script::Palmyrene, 0x10877..=0x10878 => Script::Palmyrene, 0x10879..=0x1087F => Script::Palmyrene, 0x11AC0..=0x11AF8 => Script::PauCinHau, 0x10350..=0x10375 => Script::OldPermic, 0x10376..=0x1037A => Script::OldPermic, 0x10B80..=0x10B91 => Script::PsalterPahlavi, 0x10B99..=0x10B9C => Script::PsalterPahlavi, 0x10BA9..=0x10BAF => Script::PsalterPahlavi, 0x11580..=0x115AE => Script::Siddham, 0x115AF..=0x115B1 => Script::Siddham, 0x115B2..=0x115B5 => Script::Siddham, 0x115B8..=0x115BB => Script::Siddham, 0x115BC..=0x115BD => Script::Siddham, 0x115BE => Script::Siddham, 0x115BF..=0x115C0 => Script::Siddham, 0x115C1..=0x115D7 => Script::Siddham, 0x115D8..=0x115DB => Script::Siddham, 0x115DC..=0x115DD => Script::Siddham, 0x112B0..=0x112DE => Script::Khudawadi, 0x112DF => Script::Khudawadi, 0x112E0..=0x112E2 => Script::Khudawadi, 0x112E3..=0x112EA => Script::Khudawadi, 0x112F0..=0x112F9 => Script::Khudawadi, 0x11480..=0x114AF => Script::Tirhuta, 0x114B0..=0x114B2 => Script::Tirhuta, 0x114B3..=0x114B8 => Script::Tirhuta, 0x114B9 => Script::Tirhuta, 0x114BA => Script::Tirhuta, 0x114BB..=0x114BE => Script::Tirhuta, 0x114BF..=0x114C0 => Script::Tirhuta, 0x114C1 => Script::Tirhuta, 0x114C2..=0x114C3 => Script::Tirhuta, 0x114C4..=0x114C5 => Script::Tirhuta, 0x114C6 => Script::Tirhuta, 0x114C7 => Script::Tirhuta, 0x114D0..=0x114D9 => Script::Tirhuta, 0x118A0..=0x118DF => Script::WarangCiti, 0x118E0..=0x118E9 => Script::WarangCiti, 0x118EA..=0x118F2 => Script::WarangCiti, 0x118FF => Script::WarangCiti, 0x11700..=0x11719 => Script::Ahom, 0x1171D..=0x1171F => Script::Ahom, 0x11720..=0x11721 => Script::Ahom, 0x11722..=0x11725 => Script::Ahom, 0x11726 => Script::Ahom, 0x11727..=0x1172B => Script::Ahom, 0x11730..=0x11739 => Script::Ahom, 0x1173A..=0x1173B => Script::Ahom, 0x1173C..=0x1173E => Script::Ahom, 0x1173F => Script::Ahom, 0x14400..=0x14646 => Script::AnatolianHieroglyphs, 0x108E0..=0x108F2 => Script::Hatran, 0x108F4..=0x108F5 => Script::Hatran, 0x108FB..=0x108FF => Script::Hatran, 0x11280..=0x11286 => Script::Multani, 0x11288 => Script::Multani, 0x1128A..=0x1128D => Script::Multani, 0x1128F..=0x1129D => Script::Multani, 0x1129F..=0x112A8 => Script::Multani, 0x112A9 => Script::Multani, 0x10C80..=0x10CB2 => Script::OldHungarian, 0x10CC0..=0x10CF2 => Script::OldHungarian, 0x10CFA..=0x10CFF => Script::OldHungarian, 0x1D800..=0x1D9FF => Script::SignWriting, 0x1DA00..=0x1DA36 => Script::SignWriting, 0x1DA37..=0x1DA3A => Script::SignWriting, 0x1DA3B..=0x1DA6C => Script::SignWriting, 0x1DA6D..=0x1DA74 => Script::SignWriting, 0x1DA75 => Script::SignWriting, 0x1DA76..=0x1DA83 => Script::SignWriting, 0x1DA84 => Script::SignWriting, 0x1DA85..=0x1DA86 => Script::SignWriting, 0x1DA87..=0x1DA8B => Script::SignWriting, 0x1DA9B..=0x1DA9F => Script::SignWriting, 0x1DAA1..=0x1DAAF => Script::SignWriting, 0x1E900..=0x1E943 => Script::Adlam, 0x1E944..=0x1E94A => Script::Adlam, 0x1E950..=0x1E959 => Script::Adlam, 0x1E95E..=0x1E95F => Script::Adlam, 0x11C00..=0x11C08 => Script::Bhaiksuki, 0x11C0A..=0x11C2E => Script::Bhaiksuki, 0x11C2F => Script::Bhaiksuki, 0x11C30..=0x11C36 => Script::Bhaiksuki, 0x11C38..=0x11C3D => Script::Bhaiksuki, 0x11C3E => Script::Bhaiksuki, 0x11C3F => Script::Bhaiksuki, 0x11C40 => Script::Bhaiksuki, 0x11C41..=0x11C45 => Script::Bhaiksuki, 0x11C50..=0x11C59 => Script::Bhaiksuki, 0x11C5A..=0x11C6C => Script::Bhaiksuki, 0x11C70..=0x11C71 => Script::Marchen, 0x11C72..=0x11C8F => Script::Marchen, 0x11C92..=0x11CA7 => Script::Marchen, 0x11CA9 => Script::Marchen, 0x11CAA..=0x11CB0 => Script::Marchen, 0x11CB1 => Script::Marchen, 0x11CB2..=0x11CB3 => Script::Marchen, 0x11CB4 => Script::Marchen, 0x11CB5..=0x11CB6 => Script::Marchen, 0x11400..=0x11434 => Script::Newa, 0x11435..=0x11437 => Script::Newa, 0x11438..=0x1143F => Script::Newa, 0x11440..=0x11441 => Script::Newa, 0x11442..=0x11444 => Script::Newa, 0x11445 => Script::Newa, 0x11446 => Script::Newa, 0x11447..=0x1144A => Script::Newa, 0x1144B..=0x1144F => Script::Newa, 0x11450..=0x11459 => Script::Newa, 0x1145B => Script::Newa, 0x1145D => Script::Newa, 0x104B0..=0x104D3 => Script::Osage, 0x104D8..=0x104FB => Script::Osage, 0x16FE0 => Script::Tangut, 0x17000..=0x187EC => Script::Tangut, 0x18800..=0x18AF2 => Script::Tangut, _ => Script::Any, } } #[cfg(test)] mod tests { use super::*; #[test] fn test_unicode_script() { assert_eq!(Script::Han, get_script('京')); assert_eq!(Script::Han, get_script('太')); assert_eq!(Script::Hiragana, get_script('い')); assert_eq!(Script::Katakana, get_script('グ')); assert_eq!(Script::Common, get_script('ー')); assert_eq!(Script::Latin, get_script('a')); assert_eq!(Script::Latin, get_script('A')); assert_eq!(Script::Common, get_script('0')); assert_eq!(Script::Common, get_script('$')); assert_eq!(Script::Common, get_script('@')); assert_eq!(Script::Common, get_script('-')); assert_eq!(Script::Common, get_script(' ')); assert_eq!(Script::Common, get_script('�')); } }
tokenizers/tokenizers/src/pre_tokenizers/unicode_scripts/scripts.rs/0
{ "file_path": "tokenizers/tokenizers/src/pre_tokenizers/unicode_scripts/scripts.rs", "repo_id": "tokenizers", "token_count": 46440 }
242
use crate::Result; use hf_hub::{api::sync::ApiBuilder, Repo, RepoType}; use std::collections::HashMap; use std::path::PathBuf; /// Defines the aditional parameters available for the `from_pretrained` function #[derive(Debug, Clone)] pub struct FromPretrainedParameters { pub revision: String, pub user_agent: HashMap<String, String>, pub auth_token: Option<String>, } impl Default for FromPretrainedParameters { fn default() -> Self { Self { revision: "main".into(), user_agent: HashMap::new(), auth_token: None, } } } /// Downloads and cache the identified tokenizer if it exists on /// the Hugging Face Hub, and returns a local path to the file pub fn from_pretrained<S: AsRef<str>>( identifier: S, params: Option<FromPretrainedParameters>, ) -> Result<PathBuf> { let identifier: String = identifier.as_ref().to_string(); let valid_chars = ['-', '_', '.', '/']; let is_valid_char = |x: char| x.is_alphanumeric() || valid_chars.contains(&x); let valid = identifier.chars().all(is_valid_char); let valid_chars_stringified = valid_chars .iter() .fold(vec![], |mut buf, x| { buf.push(format!("'{}'", x)); buf }) .join(", "); // "'/', '-', '_', '.'" if !valid { return Err(format!( "Model \"{}\" contains invalid characters, expected only alphanumeric or {valid_chars_stringified}", identifier ) .into()); } let params = params.unwrap_or_default(); let revision = &params.revision; let valid_revision = revision.chars().all(is_valid_char); if !valid_revision { return Err(format!( "Revision \"{}\" contains invalid characters, expected only alphanumeric or {valid_chars_stringified}", revision ) .into()); } let mut builder = ApiBuilder::new(); if let Some(token) = params.auth_token { builder = builder.with_token(Some(token)); } let api = builder.build()?; let repo = Repo::with_revision(identifier, RepoType::Model, params.revision); let api = api.repo(repo); Ok(api.get("tokenizer.json")?) }
tokenizers/tokenizers/src/utils/from_pretrained.rs/0
{ "file_path": "tokenizers/tokenizers/src/utils/from_pretrained.rs", "repo_id": "tokenizers", "token_count": 913 }
243
# Troubleshooting This is a document explaining how to deal with various issues on Circle-CI. The entries may include actual solutions or pointers to Issues that cover those. ## Circle CI * pytest worker runs out of resident RAM and gets killed by `cgroups`: https://github.com/huggingface/transformers/issues/11408
transformers/.circleci/TROUBLESHOOT.md/0
{ "file_path": "transformers/.circleci/TROUBLESHOOT.md", "repo_id": "transformers", "token_count": 80 }
244
FROM rocm/dev-ubuntu-22.04:5.6 LABEL maintainer="Hugging Face" ARG DEBIAN_FRONTEND=noninteractive ARG PYTORCH='2.1.1' ARG TORCH_VISION='0.16.1' ARG TORCH_AUDIO='2.1.1' ARG ROCM='5.6' RUN apt update && \ apt install -y --no-install-recommends \ libaio-dev \ git \ # These are required to build deepspeed. python3-dev \ python-is-python3 \ rocrand-dev \ rocthrust-dev \ hipsparse-dev \ hipblas-dev \ rocblas-dev && \ apt clean && \ rm -rf /var/lib/apt/lists/* RUN python3 -m pip install --no-cache-dir --upgrade pip ninja "pydantic<2" RUN python3 -m pip uninstall -y apex torch torchvision torchaudio RUN python3 -m pip install torch==$PYTORCH torchvision==$TORCH_VISION torchaudio==$TORCH_AUDIO --index-url https://download.pytorch.org/whl/rocm$ROCM --no-cache-dir # Pre-build DeepSpeed, so it's be ready for testing (to avoid timeout) RUN DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 python3 -m pip install deepspeed --global-option="build_ext" --global-option="-j8" --no-cache-dir -v --disable-pip-version-check 2>&1 ARG REF=main WORKDIR / # Invalidate docker cache from here if new commit is available. ADD https://api.github.com/repos/huggingface/transformers/git/refs/heads/main version.json RUN git clone https://github.com/huggingface/transformers && cd transformers && git checkout $REF RUN python3 -m pip install --no-cache-dir ./transformers[accelerate,testing,sentencepiece,sklearn] # When installing in editable mode, `transformers` is not recognized as a package. # this line must be added in order for python to be aware of transformers. RUN cd transformers && python3 setup.py develop RUN python3 -c "from deepspeed.launcher.runner import main"
transformers/docker/transformers-pytorch-deepspeed-amd-gpu/Dockerfile/0
{ "file_path": "transformers/docker/transformers-pytorch-deepspeed-amd-gpu/Dockerfile", "repo_id": "transformers", "token_count": 639 }
245
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Wie kann ich ein Modell zu 🤗 Transformers hinzufügen? Die 🤗 Transformers-Bibliothek ist dank der Beiträge der Community oft in der Lage, neue Modelle anzubieten. Aber das kann ein anspruchsvolles Projekt sein und erfordert eine eingehende Kenntnis der 🤗 Transformers-Bibliothek und des zu implementierenden Modells. Bei Hugging Face versuchen wir, mehr Mitgliedern der Community die Möglichkeit zu geben, aktiv Modelle hinzuzufügen, und wir haben diese Anleitung zusammengestellt, die Sie durch den Prozess des Hinzufügens eines PyTorch-Modells führt (stellen Sie sicher, dass Sie [PyTorch installiert haben](https://pytorch.org/get-started/locally/)). <Tip> Wenn Sie daran interessiert sind, ein TensorFlow-Modell zu implementieren, werfen Sie einen Blick in die Anleitung [How to convert a 🤗 Transformers model to TensorFlow](add_tensorflow_model)! </Tip> Auf dem Weg dorthin, werden Sie: - Einblicke in bewährte Open-Source-Verfahren erhalten - die Konstruktionsprinzipien hinter einer der beliebtesten Deep-Learning-Bibliotheken verstehen - lernen Sie, wie Sie große Modelle effizient testen können - lernen Sie, wie Sie Python-Hilfsprogramme wie `black`, `ruff` und `make fix-copies` integrieren, um sauberen und lesbaren Code zu gewährleisten Ein Mitglied des Hugging Face-Teams wird Ihnen dabei zur Seite stehen, damit Sie nicht alleine sind. 🤗 ❤️ Um loszulegen, öffnen Sie eine [New model addition](https://github.com/huggingface/transformers/issues/new?assignees=&labels=New+model&template=new-model-addition.yml) Ausgabe für das Modell, das Sie in 🤗 Transformers sehen möchten. Wenn Sie nicht besonders wählerisch sind, wenn es darum geht, ein bestimmtes Modell beizusteuern, können Sie nach dem [New model label](https://github.com/huggingface/transformers/labels/New%20model) filtern, um zu sehen, ob es noch unbeanspruchte Modellanfragen gibt, und daran arbeiten. Sobald Sie eine neue Modellanfrage eröffnet haben, sollten Sie sich zunächst mit 🤗 Transformers vertraut machen, falls Sie das noch nicht sind! ## Allgemeiner Überblick über 🤗 Transformers Zunächst sollten Sie sich einen allgemeinen Überblick über 🤗 Transformers verschaffen. 🤗 Transformers ist eine sehr meinungsfreudige Bibliothek, es ist also möglich, dass Es besteht also die Möglichkeit, dass Sie mit einigen der Philosophien oder Designentscheidungen der Bibliothek nicht einverstanden sind. Aus unserer Erfahrung heraus haben wir jedoch dass die grundlegenden Designentscheidungen und Philosophien der Bibliothek entscheidend sind, um 🤗 Transformers effizient zu skalieren. Transformatoren zu skalieren und gleichzeitig die Wartungskosten auf einem vernünftigen Niveau zu halten. Ein guter erster Ansatzpunkt, um die Bibliothek besser zu verstehen, ist die Lektüre der [Dokumentation unserer Philosophie](Philosophie). Als Ergebnis unserer Arbeitsweise gibt es einige Entscheidungen, die wir versuchen, auf alle Modelle anzuwenden: - Komposition wird im Allgemeinen gegenüber Abstraktion bevorzugt - Die Duplizierung von Code ist nicht immer schlecht, wenn sie die Lesbarkeit oder Zugänglichkeit eines Modells stark verbessert - Modelldateien sind so in sich geschlossen wie möglich, so dass Sie, wenn Sie den Code eines bestimmten Modells lesen, idealerweise nur in die entsprechende Datei `modeling_....py` schauen müssen. Unserer Meinung nach ist der Code der Bibliothek nicht nur ein Mittel, um ein Produkt bereitzustellen, *z.B.* die Möglichkeit, BERT für Inferenz zu verwenden, sondern auch als das Produkt selbst, das wir verbessern wollen. Wenn Sie also ein Modell hinzufügen, ist der Benutzer nicht nur die Person, die Ihr Modell verwenden wird, sondern auch jeder, der Ihren Code liest, zu verstehen versucht und ihn möglicherweise verbessert. Lassen Sie uns daher ein wenig tiefer in das allgemeine Design der Bibliothek einsteigen. ### Überblick über die Modelle Um ein Modell erfolgreich hinzuzufügen, ist es wichtig, die Interaktion zwischen Ihrem Modell und seiner Konfiguration zu verstehen, [`PreTrainedModel`] und [`PretrainedConfig`]. Als Beispiel werden wir das Modell, das zu 🤗 Transformers hinzugefügt werden soll, `BrandNewBert` nennen. Schauen wir uns das mal an: <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers_overview.png"/> Wie Sie sehen, machen wir in 🤗 Transformers von der Vererbung Gebrauch, aber wir beschränken die Abstraktionsebene auf ein absolutes Minimum. Minimum. Es gibt nie mehr als zwei Abstraktionsebenen für ein Modell in der Bibliothek. `BrandNewBertModel` erbt von `BrandNewBertPreTrainedModel`, das wiederum von [`PreTrainedModel`] erbt und das war's. In der Regel wollen wir sicherstellen, dass ein neues Modell nur von [`PreTrainedModel`] abhängt. Die wichtigen Funktionalitäten, die jedem neuen Modell automatisch zur Verfügung gestellt werden, sind Modell automatisch bereitgestellt werden, sind [`~PreTrainedModel.from_pretrained`] und [`~PreTrainedModel.save_pretrained`], die für die Serialisierung und Deserialisierung verwendet werden. Alle anderen wichtigen Funktionalitäten, wie `BrandNewBertModel.forward` sollten vollständig in der neuen Skript `modeling_brand_new_bert.py` definiert werden. Als nächstes wollen wir sicherstellen, dass ein Modell mit einer bestimmten Kopfebene, wie z.B. `BrandNewBertForMaskedLM` nicht von `BrandNewBertModel` erbt, sondern `BrandNewBertModel` verwendet als Komponente, die im Forward Pass aufgerufen werden kann, um die Abstraktionsebene niedrig zu halten. Jedes neue Modell erfordert eine Konfigurationsklasse, genannt `BrandNewBertConfig`. Diese Konfiguration wird immer als ein Attribut in [PreTrainedModel] gespeichert und kann daher über das Attribut `config` für alle Klassen aufgerufen werden die von `BrandNewBertPreTrainedModel` erben: ```python model = BrandNewBertModel.from_pretrained("brandy/brand_new_bert") model.config # model has access to its config ``` Ähnlich wie das Modell erbt die Konfiguration grundlegende Serialisierungs- und Deserialisierungsfunktionalitäten von [`PretrainedConfig`]. Beachten Sie, dass die Konfiguration und das Modell immer in zwei verschiedene Formate serialisiert werden unterschiedliche Formate serialisiert werden - das Modell in eine *pytorch_model.bin* Datei und die Konfiguration in eine *config.json* Datei. Aufruf von [`~PreTrainedModel.save_pretrained`] wird automatisch [`~PretrainedConfig.save_pretrained`] auf, so dass sowohl das Modell als auch die Konfiguration gespeichert werden. ### Code-Stil Wenn Sie Ihr neues Modell kodieren, sollten Sie daran denken, dass Transformers eine Bibliothek mit vielen Meinungen ist und dass wir selbst ein paar Macken haben wie der Code geschrieben werden sollte :-) 1. Der Vorwärtsdurchlauf Ihres Modells sollte vollständig in die Modellierungsdatei geschrieben werden und dabei völlig unabhängig von anderen Modellen in der Bibliothek. Wenn Sie einen Block aus einem anderen Modell wiederverwenden möchten, kopieren Sie den Code und fügen ihn mit einem `# Kopiert von` ein (siehe [hier](https://github.com/huggingface/transformers/blob/v4.17.0/src/transformers/models/roberta/modeling_roberta.py#L160) für ein gutes Beispiel und [hier](pr_checks#check-copies) für weitere Dokumentation zu Copied from). 2. Der Code sollte vollständig verständlich sein, auch für einen Nicht-Muttersprachler. Das heißt, Sie sollten beschreibende Variablennamen wählen und Abkürzungen vermeiden. Ein Beispiel: `activation` ist `act` vorzuziehen. Von Variablennamen mit nur einem Buchstaben wird dringend abgeraten, es sei denn, es handelt sich um einen Index in einer for-Schleife. 3. Generell ziehen wir längeren expliziten Code einem kurzen magischen Code vor. 4. Vermeiden Sie die Unterklassifizierung von `nn.Sequential` in PyTorch, sondern unterklassifizieren Sie `nn.Module` und schreiben Sie den Vorwärtspass, so dass jeder so dass jeder, der Ihren Code verwendet, ihn schnell debuggen kann, indem er Druckanweisungen oder Haltepunkte hinzufügt. 5. Ihre Funktionssignatur sollte mit einer Typ-Annotation versehen sein. Im Übrigen sind gute Variablennamen viel lesbarer und verständlicher verständlicher als Typ-Anmerkungen. ### Übersicht der Tokenizer Noch nicht ganz fertig :-( Dieser Abschnitt wird bald hinzugefügt! ## Schritt-für-Schritt-Rezept zum Hinzufügen eines Modells zu 🤗 Transformers Jeder hat andere Vorlieben, was die Portierung eines Modells angeht. Daher kann es sehr hilfreich sein, wenn Sie sich Zusammenfassungen ansehen wie andere Mitwirkende Modelle auf Hugging Face portiert haben. Hier ist eine Liste von Blogbeiträgen aus der Community, wie man ein Modell portiert: 1. [Portierung eines GPT2-Modells](https://medium.com/huggingface/from-tensorflow-to-pytorch-265f40ef2a28) von [Thomas](https://huggingface.co/thomwolf) 2. [Portierung des WMT19 MT-Modells](https://huggingface.co/blog/porting-fsmt) von [Stas](https://huggingface.co/stas) Aus Erfahrung können wir Ihnen sagen, dass die wichtigsten Dinge, die Sie beim Hinzufügen eines Modells beachten müssen, sind: - Erfinden Sie das Rad nicht neu! Die meisten Teile des Codes, den Sie für das neue 🤗 Transformers-Modell hinzufügen werden, existieren bereits irgendwo in 🤗 Transformers. Nehmen Sie sich etwas Zeit, um ähnliche, bereits vorhandene Modelle und Tokenizer zu finden, die Sie kopieren können von. [grep](https://www.gnu.org/software/grep/) und [rg](https://github.com/BurntSushi/ripgrep) sind Ihre Freunde. Beachten Sie, dass es sehr gut möglich ist, dass der Tokenizer Ihres Modells auf einer Modellimplementierung basiert und und der Modellierungscode Ihres Modells auf einer anderen. *Z.B.* Der Modellierungscode von FSMT basiert auf BART, während der Tokenizer-Code von FSMT auf XLM basiert. - Es handelt sich eher um eine technische als um eine wissenschaftliche Herausforderung. Sie sollten mehr Zeit auf die Schaffung einer eine effiziente Debugging-Umgebung zu schaffen, als zu versuchen, alle theoretischen Aspekte des Modells in dem Papier zu verstehen. - Bitten Sie um Hilfe, wenn Sie nicht weiterkommen! Modelle sind der Kernbestandteil von 🤗 Transformers, so dass wir bei Hugging Face mehr als mehr als glücklich, Ihnen bei jedem Schritt zu helfen, um Ihr Modell hinzuzufügen. Zögern Sie nicht zu fragen, wenn Sie merken, dass Sie nicht weiterkommen. Fortschritte machen. Im Folgenden versuchen wir, Ihnen ein allgemeines Rezept an die Hand zu geben, das uns bei der Portierung eines Modells auf 🤗 Transformers am nützlichsten erschien. Die folgende Liste ist eine Zusammenfassung all dessen, was getan werden muss, um ein Modell hinzuzufügen und kann von Ihnen als To-Do verwendet werden Liste verwenden: ☐ (Optional) Verstehen der theoretischen Aspekte des Modells<br> ☐ Vorbereiten der 🤗 Transformers-Entwicklungsumgebung<br> ☐ Debugging-Umgebung des ursprünglichen Repositorys eingerichtet<br> ☐ Skript erstellt, das den Durchlauf `forward()` unter Verwendung des ursprünglichen Repositorys und des Checkpoints erfolgreich durchführt<br> ☐ Erfolgreich das Modellskelett zu 🤗 Transformers hinzugefügt<br> ☐ Erfolgreiche Umwandlung des ursprünglichen Prüfpunkts in den 🤗 Transformers-Prüfpunkt<br> ☐ Erfolgreich den Durchlauf `forward()` in 🤗 Transformers ausgeführt, der eine identische Ausgabe wie der ursprüngliche Prüfpunkt liefert<br> ☐ Modell-Tests in 🤗 Transformers abgeschlossen<br> ☐ Erfolgreich Tokenizer in 🤗 Transformers hinzugefügt<br> ☐ End-to-End-Integrationstests ausgeführt<br> ☐ Docs fertiggestellt<br> ☐ Modellgewichte in den Hub hochgeladen<br> ☐ Die Pull-Anfrage eingereicht<br> ☐ (Optional) Hinzufügen eines Demo-Notizbuchs Für den Anfang empfehlen wir in der Regel, mit einem guten theoretischen Verständnis von `BrandNewBert` zu beginnen. Wie auch immer, wenn Sie es vorziehen, die theoretischen Aspekte des Modells *on-the-job* zu verstehen, dann ist es völlig in Ordnung, direkt in die in die Code-Basis von `BrandNewBert` einzutauchen. Diese Option könnte für Sie besser geeignet sein, wenn Ihre technischen Fähigkeiten besser sind als als Ihre theoretischen Fähigkeiten, wenn Sie Schwierigkeiten haben, die Arbeit von `BrandNewBert` zu verstehen, oder wenn Sie einfach Spaß am Programmieren mehr Spaß am Programmieren haben als am Lesen wissenschaftlicher Abhandlungen. ### 1. (Optional) Theoretische Aspekte von BrandNewBert Sie sollten sich etwas Zeit nehmen, um die Abhandlung von *BrandNewBert* zu lesen, falls eine solche Beschreibung existiert. Möglicherweise gibt es große Abschnitte des Papiers, die schwer zu verstehen sind. Wenn das der Fall ist, ist das in Ordnung - machen Sie sich keine Sorgen! Das Ziel ist ist es nicht, ein tiefes theoretisches Verständnis des Papiers zu erlangen, sondern die notwendigen Informationen zu extrahieren, um das Modell effektiv in 🤗 Transformers zu implementieren. Das heißt, Sie müssen nicht zu viel Zeit auf die theoretischen Aspekten verbringen, sondern sich lieber auf die praktischen Aspekte konzentrieren, nämlich: - Welche Art von Modell ist *brand_new_bert*? BERT-ähnliches Modell nur für den Encoder? GPT2-ähnliches reines Decoder-Modell? BART-ähnliches Encoder-Decoder-Modell? Sehen Sie sich die [model_summary](model_summary) an, wenn Sie mit den Unterschieden zwischen diesen Modellen nicht vertraut sind. - Was sind die Anwendungen von *brand_new_bert*? Textklassifizierung? Texterzeugung? Seq2Seq-Aufgaben, *z.B.,* Zusammenfassungen? - Was ist die neue Eigenschaft des Modells, die es von BERT/GPT-2/BART unterscheidet? - Welches der bereits existierenden [🤗 Transformers-Modelle](https://huggingface.co/transformers/#contents) ist am ähnlichsten ähnlich wie *brand_new_bert*? - Welche Art von Tokenizer wird verwendet? Ein Satzteil-Tokenisierer? Ein Wortstück-Tokenisierer? Ist es derselbe Tokenisierer, der für für BERT oder BART? Nachdem Sie das Gefühl haben, einen guten Überblick über die Architektur des Modells erhalten zu haben, können Sie dem Hugging Face Team schreiben und Ihre Fragen stellen. Dazu können Fragen zur Architektur des Modells gehören, seiner Aufmerksamkeitsebene usw. Wir werden Ihnen gerne weiterhelfen. ### 2. Bereiten Sie als nächstes Ihre Umgebung vor 1. Forken Sie das [Repository](https://github.com/huggingface/transformers), indem Sie auf der Seite des Repositorys auf die Schaltfläche 'Fork' klicken. Seite des Repositorys klicken. Dadurch wird eine Kopie des Codes unter Ihrem GitHub-Benutzerkonto erstellt. 2. Klonen Sie Ihren `transformers` Fork auf Ihre lokale Festplatte und fügen Sie das Basis-Repository als Remote hinzu: ```bash git clone https://github.com/[your Github handle]/transformers.git cd transformers git remote add upstream https://github.com/huggingface/transformers.git ``` 3. Richten Sie eine Entwicklungsumgebung ein, indem Sie z.B. den folgenden Befehl ausführen: ```bash python -m venv .env source .env/bin/activate pip install -e ".[dev]" ``` Abhängig von Ihrem Betriebssystem und da die Anzahl der optionalen Abhängigkeiten von Transformers wächst, kann es sein, dass Sie bei diesem Befehl einen Fehler mit diesem Befehl. Stellen Sie in diesem Fall sicher, dass Sie das Deep Learning Framework, mit dem Sie arbeiten, installieren (PyTorch, TensorFlow und/oder Flax) und führen Sie es aus: ```bash pip install -e ".[quality]" ``` was für die meisten Anwendungsfälle ausreichend sein sollte. Sie können dann zum übergeordneten Verzeichnis zurückkehren ```bash cd .. ``` 4. Wir empfehlen, die PyTorch-Version von *brand_new_bert* zu Transformers hinzuzufügen. Um PyTorch zu installieren, folgen Sie bitte den Anweisungen auf https://pytorch.org/get-started/locally/. **Anmerkung:** Sie müssen CUDA nicht installiert haben. Es reicht aus, das neue Modell auf der CPU zum Laufen zu bringen. 5. Um *brand_new_bert* zu portieren, benötigen Sie außerdem Zugriff auf das Original-Repository: ```bash git clone https://github.com/org_that_created_brand_new_bert_org/brand_new_bert.git cd brand_new_bert pip install -e . ``` Jetzt haben Sie eine Entwicklungsumgebung eingerichtet, um *brand_new_bert* auf 🤗 Transformers zu portieren. ### 3.-4. Führen Sie einen Pre-Training-Checkpoint mit dem Original-Repository durch Zunächst werden Sie mit dem ursprünglichen *brand_new_bert* Repository arbeiten. Oft ist die ursprüngliche Implementierung sehr "forschungslastig". Das bedeutet, dass es an Dokumentation mangeln kann und der Code schwer zu verstehen sein kann. Aber das sollte genau Ihre Motivation sein, *brand_new_bert* neu zu implementieren. Eines unserer Hauptziele bei Hugging Face ist es, *die Menschen dazu zu bringen auf den Schultern von Giganten zu stehen*, was sich hier sehr gut darin ausdrückt, dass wir ein funktionierendes Modell nehmen und es umschreiben, um es so es so **zugänglich, benutzerfreundlich und schön** wie möglich zu machen. Dies ist die wichtigste Motivation für die Neuimplementierung von Modelle in 🤗 Transformers umzuwandeln - der Versuch, komplexe neue NLP-Technologie für **jeden** zugänglich zu machen. Sie sollten damit beginnen, indem Sie in das Original-Repository eintauchen. Die erfolgreiche Ausführung des offiziellen Pre-Trainingsmodells im Original-Repository ist oft **der schwierigste** Schritt. Unserer Erfahrung nach ist es sehr wichtig, dass Sie einige Zeit damit verbringen, sich mit der ursprünglichen Code-Basis vertraut zu machen. Sie müssen das Folgende herausfinden: - Wo finden Sie die vortrainierten Gewichte? - Wie lädt man die vorab trainierten Gewichte in das entsprechende Modell? - Wie kann der Tokenizer unabhängig vom Modell ausgeführt werden? - Verfolgen Sie einen Forward Pass, damit Sie wissen, welche Klassen und Funktionen für einen einfachen Forward Pass erforderlich sind. Normalerweise, müssen Sie nur diese Funktionen reimplementieren. - Sie müssen in der Lage sein, die wichtigen Komponenten des Modells zu finden: Wo befindet sich die Klasse des Modells? Gibt es Unterklassen des Modells, *z.B.* EncoderModel, DecoderModel? Wo befindet sich die Selbstaufmerksamkeitsschicht? Gibt es mehrere verschiedene Aufmerksamkeitsebenen, *z.B.* *Selbstaufmerksamkeit*, *Kreuzaufmerksamkeit*...? - Wie können Sie das Modell in der ursprünglichen Umgebung des Repo debuggen? Müssen Sie *print* Anweisungen hinzufügen, können Sie mit einem interaktiven Debugger wie *ipdb* arbeiten oder sollten Sie eine effiziente IDE zum Debuggen des Modells verwenden, wie z.B. PyCharm? Es ist sehr wichtig, dass Sie, bevor Sie mit der Portierung beginnen, den Code im Original-Repository **effizient** debuggen können Repository können! Denken Sie auch daran, dass Sie mit einer Open-Source-Bibliothek arbeiten, also zögern Sie nicht, ein Problem oder oder sogar eine Pull-Anfrage im Original-Repository zu stellen. Die Betreuer dieses Repositorys sind wahrscheinlich sehr froh darüber dass jemand in ihren Code schaut! An diesem Punkt liegt es wirklich an Ihnen, welche Debugging-Umgebung und Strategie Sie zum Debuggen des ursprünglichen Modell zu debuggen. Wir raten dringend davon ab, eine kostspielige GPU-Umgebung einzurichten, sondern arbeiten Sie einfach auf einer CPU, sowohl wenn Sie mit dem in das ursprüngliche Repository einzutauchen und auch, wenn Sie beginnen, die 🤗 Transformers-Implementierung des Modells zu schreiben. Nur ganz am Ende, wenn das Modell bereits erfolgreich auf 🤗 Transformers portiert wurde, sollte man überprüfen, ob das Modell auch auf der GPU wie erwartet funktioniert. Im Allgemeinen gibt es zwei mögliche Debugging-Umgebungen für die Ausführung des Originalmodells - [Jupyter notebooks](https://jupyter.org/) / [google colab](https://colab.research.google.com/notebooks/intro.ipynb) - Lokale Python-Skripte. Jupyter-Notebooks haben den Vorteil, dass sie eine zellenweise Ausführung ermöglichen, was hilfreich sein kann, um logische Komponenten besser voneinander zu trennen und logische Komponenten voneinander zu trennen und schnellere Debugging-Zyklen zu haben, da Zwischenergebnisse gespeichert werden können. Außerdem, Außerdem lassen sich Notebooks oft leichter mit anderen Mitwirkenden teilen, was sehr hilfreich sein kann, wenn Sie das Hugging Face Team um Hilfe bitten möchten. Face Team um Hilfe bitten. Wenn Sie mit Jupyter-Notizbüchern vertraut sind, empfehlen wir Ihnen dringend, mit ihnen zu arbeiten. Der offensichtliche Nachteil von Jupyter-Notizbüchern ist, dass Sie, wenn Sie nicht daran gewöhnt sind, mit ihnen zu arbeiten, einige Zeit damit verbringen müssen einige Zeit damit verbringen müssen, sich an die neue Programmierumgebung zu gewöhnen, und dass Sie möglicherweise Ihre bekannten Debugging-Tools nicht mehr verwenden können wie z.B. `ipdb` nicht mehr verwenden können. Für jede Codebasis ist es immer ein guter erster Schritt, einen **kleinen** vortrainierten Checkpoint zu laden und in der Lage zu sein, einen einzelnen Vorwärtsdurchlauf mit einem Dummy-Integer-Vektor von Eingabe-IDs als Eingabe zu reproduzieren. Ein solches Skript könnte wie folgt aussehen (in Pseudocode): ```python model = BrandNewBertModel.load_pretrained_checkpoint("/path/to/checkpoint/") input_ids = [0, 4, 5, 2, 3, 7, 9] # vector of input ids original_output = model.predict(input_ids) ``` Was die Debugging-Strategie anbelangt, so können Sie im Allgemeinen aus mehreren Strategien wählen: - Zerlegen Sie das ursprüngliche Modell in viele kleine testbare Komponenten und führen Sie für jede dieser Komponenten einen Vorwärtsdurchlauf zur Überprüfung - Zerlegen Sie das ursprüngliche Modell nur in den ursprünglichen *Tokenizer* und das ursprüngliche *Modell*, führen Sie einen Vorwärtsdurchlauf für diese Komponenten durch und verwenden Sie dazwischenliegende Druckanweisungen oder Haltepunkte zur Überprüfung. Auch hier bleibt es Ihnen überlassen, welche Strategie Sie wählen. Oft ist die eine oder die andere Strategie vorteilhaft, je nach der ursprünglichen Codebasis Basis. Wenn die ursprüngliche Codebasis es Ihnen erlaubt, das Modell in kleinere Teilkomponenten zu zerlegen, *z.B.* wenn die ursprüngliche Code-Basis problemlos im Eager-Modus ausgeführt werden kann, lohnt es sich in der Regel, dies zu tun. Es gibt einige wichtige Vorteile am Anfang den schwierigeren Weg zu gehen: - Wenn Sie später das ursprüngliche Modell mit der Hugging Face-Implementierung vergleichen, können Sie automatisch überprüfen, ob für jede Komponente einzeln überprüfen, ob die entsprechende Komponente der 🤗 Transformers-Implementierung übereinstimmt, anstatt sich auf anstatt sich auf den visuellen Vergleich über Druckanweisungen zu verlassen - können Sie das große Problem der Portierung eines Modells in kleinere Probleme der Portierung einzelner Komponenten zerlegen einzelnen Komponenten zu zerlegen und so Ihre Arbeit besser zu strukturieren - Die Aufteilung des Modells in logisch sinnvolle Komponenten hilft Ihnen, einen besseren Überblick über das Design des Modells zu bekommen und somit das Modell besser zu verstehen - In einem späteren Stadium helfen Ihnen diese komponentenweisen Tests dabei, sicherzustellen, dass keine Regressionen auftreten, während Sie fortfahren Ihren Code ändern [Lysandre's](https://gist.github.com/LysandreJik/db4c948f6b4483960de5cbac598ad4ed) Integrationstests für ELECTRA gibt ein schönes Beispiel dafür, wie dies geschehen kann. Wenn die ursprüngliche Codebasis jedoch sehr komplex ist oder nur die Ausführung von Zwischenkomponenten in einem kompilierten Modus erlaubt, könnte es zu zeitaufwändig oder sogar unmöglich sein, das Modell in kleinere testbare Teilkomponenten zu zerlegen. Ein gutes Beispiel ist die [T5's MeshTensorFlow](https://github.com/tensorflow/mesh/tree/master/mesh_tensorflow) Bibliothek, die sehr komplex ist sehr komplex ist und keine einfache Möglichkeit bietet, das Modell in seine Unterkomponenten zu zerlegen. Bei solchen Bibliotheken ist man oft auf die Überprüfung von Druckanweisungen angewiesen. Unabhängig davon, welche Strategie Sie wählen, ist die empfohlene Vorgehensweise oft die gleiche, nämlich dass Sie mit der Fehlersuche in den die Anfangsebenen zuerst und die Endebenen zuletzt debuggen. Es wird empfohlen, dass Sie die Ausgaben der folgenden Ebenen abrufen, entweder durch Druckanweisungen oder Unterkomponentenfunktionen Schichten in der folgenden Reihenfolge abrufen: 1. Rufen Sie die Eingabe-IDs ab, die an das Modell übergeben wurden 2. Rufen Sie die Worteinbettungen ab 3. Rufen Sie die Eingabe der ersten Transformer-Schicht ab 4. Rufen Sie die Ausgabe der ersten Transformer-Schicht ab 5. Rufen Sie die Ausgabe der folgenden n - 1 Transformer-Schichten ab 6. Rufen Sie die Ausgabe des gesamten BrandNewBert Modells ab Die Eingabe-IDs sollten dabei aus einem Array von Ganzzahlen bestehen, *z.B.* `input_ids = [0, 4, 4, 3, 2, 4, 1, 7, 19]` Die Ausgaben der folgenden Schichten bestehen oft aus mehrdimensionalen Float-Arrays und können wie folgt aussehen: ``` [[ [-0.1465, -0.6501, 0.1993, ..., 0.1451, 0.3430, 0.6024], [-0.4417, -0.5920, 0.3450, ..., -0.3062, 0.6182, 0.7132], [-0.5009, -0.7122, 0.4548, ..., -0.3662, 0.6091, 0.7648], ..., [-0.5613, -0.6332, 0.4324, ..., -0.3792, 0.7372, 0.9288], [-0.5416, -0.6345, 0.4180, ..., -0.3564, 0.6992, 0.9191], [-0.5334, -0.6403, 0.4271, ..., -0.3339, 0.6533, 0.8694]]], ``` Wir erwarten, dass jedes zu 🤗 Transformers hinzugefügte Modell eine Reihe von Integrationstests besteht, was bedeutet, dass das ursprüngliche Modell und die neu implementierte Version in 🤗 Transformers exakt dieselbe Ausgabe liefern müssen, und zwar mit einer Genauigkeit von 0,001! Da es normal ist, dass das exakt gleiche Modell, das in verschiedenen Bibliotheken geschrieben wurde, je nach Bibliotheksrahmen eine leicht unterschiedliche Ausgabe liefern kann eine leicht unterschiedliche Ausgabe liefern kann, akzeptieren wir eine Fehlertoleranz von 1e-3 (0,001). Es reicht nicht aus, wenn das Modell fast das gleiche Ergebnis liefert, sie müssen fast identisch sein. Daher werden Sie sicherlich die Zwischenergebnisse Zwischenergebnisse der 🤗 Transformers-Version mehrfach mit den Zwischenergebnissen der ursprünglichen Implementierung von *brand_new_bert* vergleichen. In diesem Fall ist eine **effiziente** Debugging-Umgebung des ursprünglichen Repositorys absolut wichtig ist. Hier sind einige Ratschläge, um Ihre Debugging-Umgebung so effizient wie möglich zu gestalten. - Finden Sie den besten Weg, um Zwischenergebnisse zu debuggen. Ist das ursprüngliche Repository in PyTorch geschrieben? Dann sollten Sie dann sollten Sie sich wahrscheinlich die Zeit nehmen, ein längeres Skript zu schreiben, das das ursprüngliche Modell in kleinere Unterkomponenten zerlegt, um Zwischenwerte abzurufen. Ist das ursprüngliche Repository in Tensorflow 1 geschrieben? Dann müssen Sie sich möglicherweise auf die TensorFlow Druckoperationen wie [tf.print](https://www.tensorflow.org/api_docs/python/tf/print) verlassen, um die Zwischenwerte auszugeben. Ist das ursprüngliche Repository in Jax geschrieben? Dann stellen Sie sicher, dass das Modell **nicht jitted** ist, wenn wenn Sie den Vorwärtsdurchlauf ausführen, *z.B.* schauen Sie sich [dieser Link](https://github.com/google/jax/issues/196) an. - Verwenden Sie den kleinsten vortrainierten Prüfpunkt, den Sie finden können. Je kleiner der Prüfpunkt ist, desto schneller wird Ihr Debugging-Zyklus wird. Es ist nicht effizient, wenn Ihr vorab trainiertes Modell so groß ist, dass Ihr Vorwärtsdurchlauf mehr als 10 Sekunden dauert. Falls nur sehr große Checkpoints verfügbar sind, kann es sinnvoller sein, ein Dummy-Modell in der neuen Umgebung mit zufällig initialisierten Gewichten zu erstellen und diese Gewichte zum Vergleich mit der 🤗 Transformers-Version Ihres Modells - Vergewissern Sie sich, dass Sie den einfachsten Weg wählen, um einen Forward Pass im ursprünglichen Repository aufzurufen. Idealerweise sollten Sie die Funktion im originalen Repository finden, die **nur** einen einzigen Vorwärtspass aufruft, *d.h.* die oft aufgerufen wird Vorhersagen", "Auswerten", "Vorwärts" oder "Aufruf" genannt wird. Sie wollen keine Funktion debuggen, die `forward` aufruft mehrfach aufruft, *z.B.* um Text zu erzeugen, wie `autoregressive_sample`, `generate`. - Versuchen Sie, die Tokenisierung vom *Forward*-Pass des Modells zu trennen. Wenn das Original-Repository Beispiele zeigt, bei denen Sie eine Zeichenkette eingeben müssen, dann versuchen Sie herauszufinden, an welcher Stelle im Vorwärtsaufruf die Zeichenketteneingabe in Eingabe-IDs geändert wird geändert wird und beginnen Sie an dieser Stelle. Das könnte bedeuten, dass Sie möglicherweise selbst ein kleines Skript schreiben oder den Originalcode so ändern müssen, dass Sie die ids direkt eingeben können, anstatt eine Zeichenkette einzugeben. - Vergewissern Sie sich, dass sich das Modell in Ihrem Debugging-Setup **nicht** im Trainingsmodus befindet, der oft dazu führt, dass das Modell Dies führt häufig zu zufälligen Ergebnissen, da das Modell mehrere Dropout-Schichten enthält. Stellen Sie sicher, dass der Vorwärtsdurchlauf in Ihrer Debugging Umgebung **deterministisch** ist, damit die Dropout-Schichten nicht verwendet werden. Oder verwenden Sie *transformers.utils.set_seed*. wenn sich die alte und die neue Implementierung im selben Framework befinden. Im folgenden Abschnitt finden Sie genauere Details/Tipps, wie Sie dies für *brand_new_bert* tun können. ### 5.-14. Portierung von BrandNewBert auf 🤗 Transformatoren Als nächstes können Sie endlich damit beginnen, neuen Code zu 🤗 Transformers hinzuzufügen. Gehen Sie in den Klon Ihres 🤗 Transformers Forks: ```bash cd transformers ``` In dem speziellen Fall, dass Sie ein Modell hinzufügen, dessen Architektur genau mit der Modellarchitektur eines Modells übereinstimmt, müssen Sie nur ein Konvertierungsskript hinzufügen, wie in [diesem Abschnitt](#write-a-conversion-script) beschrieben. In diesem Fall können Sie einfach die gesamte Modellarchitektur des bereits vorhandenen Modells wiederverwenden. Andernfalls beginnen wir mit der Erstellung eines neuen Modells. Sie haben hier zwei Möglichkeiten: - `transformers-cli add-new-model-like`, um ein neues Modell wie ein bestehendes hinzuzufügen - `transformers-cli add-new-model`, um ein neues Modell aus unserer Vorlage hinzuzufügen (sieht dann aus wie BERT oder Bart, je nachdem, welche Art von Modell Sie wählen) In beiden Fällen werden Sie mit einem Fragebogen aufgefordert, die grundlegenden Informationen zu Ihrem Modell auszufüllen. Für den zweiten Befehl müssen Sie `cookiecutter` installieren, weitere Informationen dazu finden Sie [hier](https://github.com/huggingface/transformers/tree/main/templates/adding_a_new_model). **Eröffnen Sie einen Pull Request auf dem Haupt-Repositorium huggingface/transformers** Bevor Sie mit der Anpassung des automatisch generierten Codes beginnen, ist es nun an der Zeit, einen "Work in progress (WIP)" Pull Anfrage, *z.B.* "[WIP] Add *brand_new_bert*", in 🤗 Transformers zu öffnen, damit Sie und das Hugging Face Team Seite an Seite an der Integration des Modells in 🤗 Transformers arbeiten können. Sie sollten Folgendes tun: 1. Erstellen Sie eine Verzweigung mit einem beschreibenden Namen von Ihrer Hauptverzweigung ```bash git checkout -b add_brand_new_bert ``` 2. Bestätigen Sie den automatisch generierten Code: ```bash git add . git commit ``` 3. Abrufen und zurücksetzen auf die aktuelle Haupt ```bash git fetch upstream git rebase upstream/main ``` 4. Übertragen Sie die Änderungen auf Ihr Konto mit: ```bash git push -u origin a-descriptive-name-for-my-changes ``` 5. Wenn Sie zufrieden sind, gehen Sie auf die Webseite Ihrer Abspaltung auf GitHub. Klicken Sie auf "Pull request". Stellen Sie sicher, dass Sie das GitHub-Handle einiger Mitglieder des Hugging Face-Teams als Reviewer hinzuzufügen, damit das Hugging Face-Team über zukünftige Änderungen informiert wird. zukünftige Änderungen benachrichtigt wird. 6. Ändern Sie den PR in einen Entwurf, indem Sie auf der rechten Seite der GitHub-Pull-Request-Webseite auf "In Entwurf umwandeln" klicken. Vergessen Sie im Folgenden nicht, wenn Sie Fortschritte gemacht haben, Ihre Arbeit zu committen und in Ihr Konto zu pushen, damit sie in der Pull-Anfrage erscheint. damit sie in der Pull-Anfrage angezeigt wird. Außerdem sollten Sie darauf achten, dass Sie Ihre Arbeit von Zeit zu Zeit mit dem aktuellen main von Zeit zu Zeit zu aktualisieren, indem Sie dies tun: ```bash git fetch upstream git merge upstream/main ``` Generell sollten Sie alle Fragen, die Sie in Bezug auf das Modell oder Ihre Implementierung haben, in Ihrem PR stellen und in der PR diskutiert/gelöst werden. Auf diese Weise wird das Hugging Face Team immer benachrichtigt, wenn Sie neuen Code einreichen oder wenn Sie eine Frage haben. Es ist oft sehr hilfreich, das Hugging Face-Team auf Ihren hinzugefügten Code hinzuweisen, damit das Hugging Face-Team Ihr Problem oder Ihre Frage besser verstehen kann. Face-Team Ihr Problem oder Ihre Frage besser verstehen kann. Gehen Sie dazu auf die Registerkarte "Geänderte Dateien", auf der Sie alle Ihre Änderungen sehen, gehen Sie zu einer Zeile, zu der Sie eine Frage stellen möchten eine Frage stellen möchten, und klicken Sie auf das "+"-Symbol, um einen Kommentar hinzuzufügen. Wenn eine Frage oder ein Problem gelöst wurde, können Sie auf die Schaltfläche "Lösen" des erstellten Kommentars klicken. Auf dieselbe Weise wird das Hugging Face-Team Kommentare öffnen, wenn es Ihren Code überprüft. Wir empfehlen, die meisten Fragen auf GitHub in Ihrem PR zu stellen. Für einige sehr allgemeine Fragen, die für die Öffentlichkeit nicht sehr nützlich sind, können Sie das Hugging Face Team per Slack oder E-Mail zu stellen. **5. Passen Sie den Code der generierten Modelle für brand_new_bert** an. Zunächst werden wir uns nur auf das Modell selbst konzentrieren und uns nicht um den Tokenizer kümmern. Den gesamten relevanten Code sollten Sie finden Sie in den generierten Dateien `src/transformers/models/brand_new_bert/modeling_brand_new_bert.py` und `src/transformers/models/brand_new_bert/configuration_brand_new_bert.py`. Jetzt können Sie endlich mit dem Programmieren beginnen :). Der generierte Code in `src/transformers/models/brand_new_bert/modeling_brand_new_bert.py` wird entweder die gleiche Architektur wie BERT haben, wenn wenn es sich um ein reines Encoder-Modell handelt oder BART, wenn es sich um ein Encoder-Decoder-Modell handelt. An diesem Punkt sollten Sie sich daran erinnern, was was Sie am Anfang über die theoretischen Aspekte des Modells gelernt haben: *Wie unterscheidet sich das Modell von BERT oder BART?*". Implementieren Sie diese Änderungen, was oft bedeutet, dass Sie die *Selbstaufmerksamkeitsschicht*, die Reihenfolge der Normalisierungsschicht usw. ändern müssen. Schicht usw... Auch hier ist es oft nützlich, sich die ähnliche Architektur bereits bestehender Modelle in Transformers anzusehen, um ein besseres Gefühl dafür zu bekommen ein besseres Gefühl dafür zu bekommen, wie Ihr Modell implementiert werden sollte. **Beachten Sie**, dass Sie an diesem Punkt nicht sehr sicher sein müssen, dass Ihr Code völlig korrekt oder sauber ist. Vielmehr ist es Sie sollten vielmehr eine erste *unbereinigte*, kopierte Version des ursprünglichen Codes in src/transformers/models/brand_new_bert/modeling_brand_new_bert.py" hinzuzufügen, bis Sie das Gefühl haben, dass der gesamte notwendige Code hinzugefügt wurde. Unserer Erfahrung nach ist es viel effizienter, schnell eine erste Version des erforderlichen Codes hinzuzufügen und den Code iterativ mit dem Konvertierungsskript zu verbessern/korrigieren, wie im nächsten Abschnitt beschrieben. Das einzige, was zu diesem Zeitpunkt funktionieren muss, ist, dass Sie die 🤗 Transformers-Implementierung von *brand_new_bert* instanziieren können, *d.h.* der folgende Befehl sollte funktionieren: ```python from transformers import BrandNewBertModel, BrandNewBertConfig model = BrandNewBertModel(BrandNewBertConfig()) ``` Der obige Befehl erstellt ein Modell gemäß den Standardparametern, die in `BrandNewBertConfig()` definiert sind, mit zufälligen Gewichten und stellt damit sicher, dass die `init()` Methoden aller Komponenten funktionieren. Beachten Sie, dass alle zufälligen Initialisierungen in der Methode `_init_weights` Ihres `BrandnewBertPreTrainedModel` stattfinden sollten. Klasse erfolgen sollte. Sie sollte alle Blattmodule in Abhängigkeit von den Variablen der Konfiguration initialisieren. Hier ist ein Beispiel mit der BERT `_init_weights` Methode: ```py def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) ``` Sie können weitere benutzerdefinierte Schemata verwenden, wenn Sie eine spezielle Initialisierung für einige Module benötigen. Zum Beispiel in `Wav2Vec2ForPreTraining` müssen die letzten beiden linearen Schichten die Initialisierung des regulären PyTorch `nn.Linear` haben. aber alle anderen sollten eine Initialisierung wie oben verwenden. Dies ist wie folgt kodiert: ```py def _init_weights(self, module): """Initialize the weights""" if isinstance(module, Wav2Vec2ForPreTraining): module.project_hid.reset_parameters() module.project_q.reset_parameters() module.project_hid._is_hf_initialized = True module.project_q._is_hf_initialized = True elif isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() ``` Das Flag `_is_hf_initialized` wird intern verwendet, um sicherzustellen, dass wir ein Submodul nur einmal initialisieren. Wenn Sie es auf `True` für `module.project_q` und `module.project_hid` setzen, stellen wir sicher, dass die benutzerdefinierte Initialisierung, die wir vorgenommen haben, später nicht überschrieben wird, die Funktion `_init_weights` nicht auf sie angewendet wird. **6. Schreiben Sie ein Konvertierungsskript** Als nächstes sollten Sie ein Konvertierungsskript schreiben, mit dem Sie den Checkpoint, den Sie zum Debuggen von *brand_new_bert* im im ursprünglichen Repository in einen Prüfpunkt konvertieren, der mit Ihrer gerade erstellten 🤗 Transformers-Implementierung von *brand_new_bert*. Es ist nicht ratsam, das Konvertierungsskript von Grund auf neu zu schreiben, sondern die bereits bestehenden Konvertierungsskripten in 🤗 Transformers nach einem Skript zu suchen, das für die Konvertierung eines ähnlichen Modells verwendet wurde, das im demselben Framework wie *brand_new_bert* geschrieben wurde. Normalerweise reicht es aus, ein bereits vorhandenes Konvertierungsskript zu kopieren und es für Ihren Anwendungsfall leicht anzupassen. Zögern Sie nicht, das Hugging Face Team zu bitten, Sie auf ein ähnliches, bereits vorhandenes Konvertierungsskript für Ihr Modell zu finden. - Wenn Sie ein Modell von TensorFlow nach PyTorch portieren, ist ein guter Ausgangspunkt das Konvertierungsskript von BERT [hier](https://github.com/huggingface/transformers/blob/7acfa95afb8194f8f9c1f4d2c6028224dbed35a2/src/transformers/models/bert/modeling_bert.py#L91) - Wenn Sie ein Modell von PyTorch nach PyTorch portieren, ist ein guter Ausgangspunkt das Konvertierungsskript von BART [hier](https://github.com/huggingface/transformers/blob/main/src/transformers/models/bart/convert_bart_original_pytorch_checkpoint_to_pytorch.py) Im Folgenden werden wir kurz erklären, wie PyTorch-Modelle Ebenengewichte speichern und Ebenennamen definieren. In PyTorch wird der Name einer Ebene durch den Namen des Klassenattributs definiert, das Sie der Ebene geben. Lassen Sie uns ein Dummy-Modell in PyTorch, das wir `SimpleModel` nennen, wie folgt: ```python from torch import nn class SimpleModel(nn.Module): def __init__(self): super().__init__() self.dense = nn.Linear(10, 10) self.intermediate = nn.Linear(10, 10) self.layer_norm = nn.LayerNorm(10) ``` Jetzt können wir eine Instanz dieser Modelldefinition erstellen, die alle Gewichte ausfüllt: `dense`, `intermediate`, `layer_norm` mit zufälligen Gewichten. Wir können das Modell ausdrucken, um seine Architektur zu sehen ```python model = SimpleModel() print(model) ``` Dies gibt folgendes aus: ``` SimpleModel( (dense): Linear(in_features=10, out_features=10, bias=True) (intermediate): Linear(in_features=10, out_features=10, bias=True) (layer_norm): LayerNorm((10,), eps=1e-05, elementwise_affine=True) ) ``` Wir können sehen, dass die Ebenennamen durch den Namen des Klassenattributs in PyTorch definiert sind. Sie können die Gewichtswerte Werte einer bestimmten Ebene anzeigen lassen: ```python print(model.dense.weight.data) ``` um zu sehen, dass die Gewichte zufällig initialisiert wurden ``` tensor([[-0.0818, 0.2207, -0.0749, -0.0030, 0.0045, -0.1569, -0.1598, 0.0212, -0.2077, 0.2157], [ 0.1044, 0.0201, 0.0990, 0.2482, 0.3116, 0.2509, 0.2866, -0.2190, 0.2166, -0.0212], [-0.2000, 0.1107, -0.1999, -0.3119, 0.1559, 0.0993, 0.1776, -0.1950, -0.1023, -0.0447], [-0.0888, -0.1092, 0.2281, 0.0336, 0.1817, -0.0115, 0.2096, 0.1415, -0.1876, -0.2467], [ 0.2208, -0.2352, -0.1426, -0.2636, -0.2889, -0.2061, -0.2849, -0.0465, 0.2577, 0.0402], [ 0.1502, 0.2465, 0.2566, 0.0693, 0.2352, -0.0530, 0.1859, -0.0604, 0.2132, 0.1680], [ 0.1733, -0.2407, -0.1721, 0.1484, 0.0358, -0.0633, -0.0721, -0.0090, 0.2707, -0.2509], [-0.1173, 0.1561, 0.2945, 0.0595, -0.1996, 0.2988, -0.0802, 0.0407, 0.1829, -0.1568], [-0.1164, -0.2228, -0.0403, 0.0428, 0.1339, 0.0047, 0.1967, 0.2923, 0.0333, -0.0536], [-0.1492, -0.1616, 0.1057, 0.1950, -0.2807, -0.2710, -0.1586, 0.0739, 0.2220, 0.2358]]). ``` Im Konvertierungsskript sollten Sie diese zufällig initialisierten Gewichte mit den genauen Gewichten der entsprechenden Ebene im Kontrollpunkt. *Z.B.* ```python # retrieve matching layer weights, e.g. by # recursive algorithm layer_name = "dense" pretrained_weight = array_of_dense_layer model_pointer = getattr(model, "dense") model_pointer.weight.data = torch.from_numpy(pretrained_weight) ``` Dabei müssen Sie sicherstellen, dass jedes zufällig initialisierte Gewicht Ihres PyTorch-Modells und sein entsprechendes Checkpoint-Gewicht in **Form und Name** genau übereinstimmen. Zu diesem Zweck ist es **notwendig**, assert Anweisungen für die Form hinzuzufügen und die Namen der Checkpoint-Gewichte auszugeben. Sie sollten z.B. Anweisungen hinzufügen wie: ```python assert ( model_pointer.weight.shape == pretrained_weight.shape ), f"Pointer shape of random weight {model_pointer.shape} and array shape of checkpoint weight {pretrained_weight.shape} mismatched" ``` Außerdem sollten Sie die Namen der beiden Gewichte ausdrucken, um sicherzustellen, dass sie übereinstimmen, *z.B.*. ```python logger.info(f"Initialize PyTorch weight {layer_name} from {pretrained_weight.name}") ``` Wenn entweder die Form oder der Name nicht übereinstimmt, haben Sie wahrscheinlich das falsche Kontrollpunktgewicht einer zufällig Ebene der 🤗 Transformers-Implementierung zugewiesen. Eine falsche Form ist höchstwahrscheinlich auf eine falsche Einstellung der Konfigurationsparameter in `BrandNewBertConfig()` zurückzuführen, die nicht genau mit denen übereinstimmen, die für den zu konvertierenden Prüfpunkt verwendet wurden. Es könnte aber auch sein, dass die PyTorch-Implementierung eines Layers erfordert, dass das Gewicht vorher transponiert wird. Schließlich sollten Sie auch überprüfen, ob **alle** erforderlichen Gewichte initialisiert sind und alle Checkpoint-Gewichte ausgeben, die die nicht zur Initialisierung verwendet wurden, um sicherzustellen, dass das Modell korrekt konvertiert wurde. Es ist völlig normal, dass die Konvertierungsversuche entweder mit einer falschen Shape-Anweisung oder einer falschen Namenszuweisung fehlschlagen. Das liegt höchstwahrscheinlich daran, dass entweder Sie haben falsche Parameter in `BrandNewBertConfig()` verwendet, haben eine falsche Architektur in der 🤗 Transformers Implementierung, Sie haben einen Fehler in den `init()` Funktionen einer der Komponenten der 🤗 Transformers Implementierung oder Sie müssen eine der Kontrollpunktgewichte transponieren. Dieser Schritt sollte mit dem vorherigen Schritt wiederholt werden, bis alle Gewichte des Kontrollpunkts korrekt in das Transformers-Modell geladen sind. Nachdem Sie den Prüfpunkt korrekt in die 🤗 Transformers-Implementierung geladen haben, können Sie das Modell das Modell unter einem Ordner Ihrer Wahl `/path/to/converted/checkpoint/folder` speichern, der dann sowohl ein Datei `pytorch_model.bin` und eine Datei `config.json` enthalten sollte: ```python model.save_pretrained("/path/to/converted/checkpoint/folder") ``` **7. Implementieren Sie den Vorwärtspass** Nachdem es Ihnen gelungen ist, die trainierten Gewichte korrekt in die 🤗 Transformers-Implementierung zu laden, sollten Sie nun dafür sorgen sicherstellen, dass der Forward Pass korrekt implementiert ist. In [Machen Sie sich mit dem ursprünglichen Repository vertraut](#3-4-führen-sie-einen-pre-training-checkpoint-mit-dem-original-repository-durch) haben Sie bereits ein Skript erstellt, das einen Forward Pass Durchlauf des Modells unter Verwendung des Original-Repositorys durchführt. Jetzt sollten Sie ein analoges Skript schreiben, das die 🤗 Transformers Implementierung anstelle der Originalimplementierung verwenden. Es sollte wie folgt aussehen: ```python model = BrandNewBertModel.from_pretrained("/path/to/converted/checkpoint/folder") input_ids = [0, 4, 4, 3, 2, 4, 1, 7, 19] output = model(input_ids).last_hidden_states ``` Es ist sehr wahrscheinlich, dass die 🤗 Transformers-Implementierung und die ursprüngliche Modell-Implementierung nicht genau die gleiche Ausgabe liefern. beim ersten Mal nicht die gleiche Ausgabe liefern oder dass der Vorwärtsdurchlauf einen Fehler auslöst. Seien Sie nicht enttäuscht - das ist zu erwarten! Erstens, sollten Sie sicherstellen, dass der Vorwärtsdurchlauf keine Fehler auslöst. Es passiert oft, dass die falschen Dimensionen verwendet werden verwendet werden, was zu einem *Dimensionality mismatch* Fehler führt oder dass der falsche Datentyp verwendet wird, *z.B.* `torch.long` anstelle von `torch.float32`. Zögern Sie nicht, das Hugging Face Team um Hilfe zu bitten, wenn Sie bestimmte Fehler nicht lösen können. bestimmte Fehler nicht lösen können. Um sicherzustellen, dass die Implementierung von 🤗 Transformers korrekt funktioniert, müssen Sie sicherstellen, dass die Ausgaben einer Genauigkeit von `1e-3` entsprechen. Zunächst sollten Sie sicherstellen, dass die Ausgabeformen identisch sind, *d.h.*. Die Ausgabeform *outputs.shape* sollte für das Skript der 🤗 Transformers-Implementierung und die ursprüngliche Implementierung ergeben. Als nächstes sollten Sie sicherstellen, dass auch die Ausgabewerte identisch sind. Dies ist einer der schwierigsten Teile des Hinzufügens eines neuen Modells. Häufige Fehler, warum die Ausgaben nicht identisch sind, sind: - Einige Ebenen wurden nicht hinzugefügt, *d.h.* eine *Aktivierungsebene* wurde nicht hinzugefügt, oder die Restverbindung wurde vergessen - Die Worteinbettungsmatrix wurde nicht gebunden - Es werden die falschen Positionseinbettungen verwendet, da die ursprüngliche Implementierung einen Offset verwendet - Dropout wird während des Vorwärtsdurchlaufs angewendet. Um dies zu beheben, stellen Sie sicher, dass *model.training auf False* steht und dass keine Dropout Schicht während des Vorwärtsdurchlaufs fälschlicherweise aktiviert wird, *d.h.* übergeben Sie *self.training* an [PyTorch's functional dropout](https://pytorch.org/docs/stable/nn.functional.html?highlight=dropout#torch.nn.functional.dropout) Der beste Weg, das Problem zu beheben, besteht normalerweise darin, sich den Vorwärtsdurchlauf der ursprünglichen Implementierung und die 🤗 Transformers-Implementierung nebeneinander zu sehen und zu prüfen, ob es Unterschiede gibt. Idealerweise sollten Sie die Zwischenergebnisse beider Implementierungen des Vorwärtsdurchlaufs debuggen/ausdrucken, um die genaue Position im Netzwerk zu finden, an der die 🤗 Transformers-Implementierung eine andere Ausgabe zeigt als die ursprüngliche Implementierung. Stellen Sie zunächst sicher, dass die hartcodierten `input_ids` in beiden Skripten identisch sind. Überprüfen Sie dann, ob die Ausgaben der ersten Transformation von der `input_ids` (normalerweise die Worteinbettungen) identisch sind. Und dann arbeiten Sie sich bis zur allerletzten Schicht des Netzwerks. Irgendwann werden Sie einen Unterschied zwischen den beiden Implementierungen feststellen, der Sie auf den Fehler in der Implementierung von 🤗 Transformers hinweist. Unserer Erfahrung nach ist ein einfacher und effizienter Weg, viele Druckanweisungen hinzuzufügen sowohl in der Original-Implementierung als auch in der 🤗 Transformers-Implementierung an den gleichen Stellen im Netzwerk hinzuzufügen und nacheinander Druckanweisungen zu entfernen, die dieselben Werte für Zwischenpräsentationen anzeigen. Wenn Sie sicher sind, dass beide Implementierungen die gleiche Ausgabe liefern, überprüfen Sie die Ausgaben mit `torch.allclose(original_output, output, atol=1e-3)` überprüfen, haben Sie den schwierigsten Teil hinter sich! Herzlichen Glückwunsch - die Arbeit, die noch zu erledigen ist, sollte ein Kinderspiel sein 😊. **8. Hinzufügen aller notwendigen Modelltests** An diesem Punkt haben Sie erfolgreich ein neues Modell hinzugefügt. Es ist jedoch sehr gut möglich, dass das Modell noch nicht noch nicht vollständig mit dem erforderlichen Design übereinstimmt. Um sicherzustellen, dass die Implementierung vollständig kompatibel mit 🤗 Transformers ist, sollten alle gemeinsamen Tests bestehen. Der Cookiecutter sollte automatisch eine Testdatei für Ihr Modell hinzugefügt haben, wahrscheinlich unter demselben `tests/models/brand_new_bert/test_modeling_brand_new_bert.py`. Führen Sie diese Testdatei aus, um zu überprüfen, ob alle gängigen Tests bestehen: ```bash pytest tests/models/brand_new_bert/test_modeling_brand_new_bert.py ``` Nachdem Sie alle allgemeinen Tests festgelegt haben, müssen Sie nun sicherstellen, dass all die schöne Arbeit, die Sie geleistet haben, gut getestet ist, damit - a) die Community Ihre Arbeit leicht nachvollziehen kann, indem sie sich spezifische Tests von *brand_new_bert* ansieht - b) zukünftige Änderungen an Ihrem Modell keine wichtigen Funktionen des Modells zerstören. Als erstes sollten Sie Integrationstests hinzufügen. Diese Integrationstests tun im Wesentlichen dasselbe wie die Debugging-Skripte die Sie zuvor zur Implementierung des Modells in 🤗 Transformers verwendet haben. Eine Vorlage für diese Modelltests wurde bereits von dem Cookiecutter hinzugefügt, die `BrandNewBertModelIntegrationTests` heißt und nur noch von Ihnen ausgefüllt werden muss. Um sicherzustellen, dass diese Tests erfolgreich sind, führen Sie ```bash RUN_SLOW=1 pytest -sv tests/models/brand_new_bert/test_modeling_brand_new_bert.py::BrandNewBertModelIntegrationTests ``` <Tip> Falls Sie Windows verwenden, sollten Sie `RUN_SLOW=1` durch `SET RUN_SLOW=1` ersetzen. </Tip> Zweitens sollten alle Funktionen, die speziell für *brand_new_bert* sind, zusätzlich in einem separaten Test getestet werden unter `BrandNewBertModelTester`/`BrandNewBertModelTest`. Dieser Teil wird oft vergessen, ist aber in zweierlei Hinsicht äußerst nützlich Weise: - Er hilft dabei, das Wissen, das Sie während der Modellerweiterung erworben haben, an die Community weiterzugeben, indem er zeigt, wie die speziellen Funktionen von *brand_new_bert* funktionieren sollten. - Künftige Mitwirkende können Änderungen am Modell schnell testen, indem sie diese speziellen Tests ausführen. **9. Implementieren Sie den Tokenizer** Als nächstes sollten wir den Tokenizer von *brand_new_bert* hinzufügen. Normalerweise ist der Tokenizer äquivalent oder sehr ähnlich zu einem bereits vorhandenen Tokenizer von 🤗 Transformers. Es ist sehr wichtig, die ursprüngliche Tokenizer-Datei zu finden/extrahieren und es zu schaffen, diese Datei in die 🤗 Transformers Implementierung des Tokenizers zu laden. Um sicherzustellen, dass der Tokenizer korrekt funktioniert, empfiehlt es sich, zunächst ein Skript im ursprünglichen Repository zu erstellen zu erstellen, das eine Zeichenkette eingibt und die `input_ids` zurückgibt. Es könnte etwa so aussehen (in Pseudocode): ```python input_str = "This is a long example input string containing special characters .$?-, numbers 2872 234 12 and words." model = BrandNewBertModel.load_pretrained_checkpoint("/path/to/checkpoint/") input_ids = model.tokenize(input_str) ``` Möglicherweise müssen Sie noch einmal einen Blick in das ursprüngliche Repository werfen, um die richtige Tokenizer-Funktion zu finden, oder Sie müssen Sie müssen vielleicht sogar Änderungen an Ihrem Klon des Original-Repositorys vornehmen, um nur die `input_ids` auszugeben. Nach dem Schreiben ein funktionierendes Tokenisierungsskript geschrieben, das das ursprüngliche Repository verwendet, sollten Sie ein analoges Skript für 🤗 Transformers erstellt werden. Es sollte ähnlich wie dieses aussehen: ```python from transformers import BrandNewBertTokenizer input_str = "This is a long example input string containing special characters .$?-, numbers 2872 234 12 and words." tokenizer = BrandNewBertTokenizer.from_pretrained("/path/to/tokenizer/folder/") input_ids = tokenizer(input_str).input_ids ``` Wenn beide `input_ids` die gleichen Werte ergeben, sollte als letzter Schritt auch eine Tokenizer-Testdatei hinzugefügt werden. Analog zu den Modellierungstestdateien von *brand_new_bert* sollten auch die Tokenisierungs-Testdateien von *brand_new_bert* eine Reihe von fest kodierten Integrationstests enthalten. **10. Führen Sie End-to-End-Integrationstests aus** Nachdem Sie den Tokenizer hinzugefügt haben, sollten Sie auch ein paar End-to-End-Integrationstests, die sowohl das Modell als auch den Tokenizer zu `tests/models/brand_new_bert/test_modeling_brand_new_bert.py` in 🤗 Transformers. Ein solcher Test sollte bei einem aussagekräftigen Text-zu-Text-Beispiel zeigen, dass die Implementierung von 🤗 Transformers wie erwartet funktioniert. Ein aussagekräftiges Text-zu-Text-Beispiel kann z.B. *ein Quell-zu-Ziel-Übersetzungspaar, ein Artikel-zu-Zusammenfassung-Paar, ein Frage-zu-Antwort-Paar, usw... Wenn keiner der der portierten Prüfpunkte in einer nachgelagerten Aufgabe feinabgestimmt wurde, genügt es, sich einfach auf die Modelltests zu verlassen. In einem letzten Schritt, um sicherzustellen, dass das Modell voll funktionsfähig ist, sollten Sie alle Tests auch auf der GPU durchführen. Es kann Es kann vorkommen, dass Sie vergessen haben, einige `.to(self.device)` Anweisungen zu internen Tensoren des Modells hinzuzufügen, was in einem solchen Test zu einem Fehler führen würde. Falls Sie keinen Zugang zu einem Grafikprozessor haben, kann das Hugging Face Team diese Tests für Sie durchführen. Tests für Sie übernehmen. **11. Docstring hinzufügen** Nun sind alle notwendigen Funktionen für *brand_new_bert* hinzugefügt - Sie sind fast fertig! Das Einzige, was Sie noch hinzufügen müssen, ist ein schöner Docstring und eine Doku-Seite. Der Cookiecutter sollte eine Vorlagendatei namens `docs/source/model_doc/brand_new_bert.md` hinzugefügt haben, die Sie ausfüllen sollten. Die Benutzer Ihres Modells werden in der Regel zuerst einen Blick auf diese Seite ansehen, bevor sie Ihr Modell verwenden. Daher muss die Dokumentation verständlich und prägnant sein. Es ist sehr nützlich für die Gemeinschaft, einige *Tipps* hinzuzufügen, um zu zeigen, wie das Modell verwendet werden sollte. Zögern Sie nicht, das Hugging Face-Team anzupingen bezüglich der Docstrings. Stellen Sie als nächstes sicher, dass der zu `src/transformers/models/brand_new_bert/modeling_brand_new_bert.py` hinzugefügte docstring korrekt ist und alle erforderlichen Eingaben und Ausgaben enthält. Wir haben eine ausführliche Anleitung zum Schreiben von Dokumentationen und unserem Docstring-Format [hier](writing-documentation). Es ist immer gut, sich daran zu erinnern, dass die Dokumentation mindestens so sorgfältig behandelt werden sollte wie der Code in 🤗 Transformers, denn die Dokumentation ist in der Regel der erste Kontaktpunkt der Berührungspunkt der Community mit dem Modell ist. **Code refactor** Großartig, jetzt haben Sie den gesamten erforderlichen Code für *brand_new_bert* hinzugefügt. An diesem Punkt sollten Sie einige mögliche falschen Codestil korrigieren, indem Sie ausführen: ```bash make style ``` und überprüfen Sie, ob Ihr Kodierungsstil die Qualitätsprüfung besteht: ```bash make quality ``` Es gibt noch ein paar andere sehr strenge Designtests in 🤗 Transformers, die möglicherweise noch fehlschlagen, was sich in den den Tests Ihres Pull Requests. Dies liegt oft an fehlenden Informationen im Docstring oder an einer falschen Benennung. Das Hugging Face Team wird Ihnen sicherlich helfen, wenn Sie hier nicht weiterkommen. Und schließlich ist es immer eine gute Idee, den eigenen Code zu refaktorisieren, nachdem man sichergestellt hat, dass er korrekt funktioniert. Wenn alle Tests bestanden haben, ist es nun an der Zeit, den hinzugefügten Code noch einmal durchzugehen und einige Überarbeitungen vorzunehmen. Sie haben nun den Codierungsteil abgeschlossen, herzlichen Glückwunsch! 🎉 Sie sind großartig! 😎 **12. Laden Sie die Modelle in den Model Hub hoch** In diesem letzten Teil sollten Sie alle Checkpoints konvertieren und in den Modell-Hub hochladen und eine Modellkarte für jeden hochgeladenen Modell-Kontrollpunkt. Sie können sich mit den Hub-Funktionen vertraut machen, indem Sie unsere [Model sharing and uploading Page](model_sharing) lesen. Hier sollten Sie mit dem Hugging Face-Team zusammenarbeiten, um einen passenden Namen für jeden Checkpoint festzulegen und die erforderlichen Zugriffsrechte zu erhalten, um das Modell unter der Organisation des Autors *brand_new_bert* hochladen zu können. *brand_new_bert*. Die Methode `push_to_hub`, die in allen Modellen in `transformers` vorhanden ist, ist ein schneller und effizienter Weg, Ihren Checkpoint in den Hub zu pushen. Ein kleines Snippet ist unten eingefügt: ```python brand_new_bert.push_to_hub("brand_new_bert") # Uncomment the following line to push to an organization. # brand_new_bert.push_to_hub("<organization>/brand_new_bert") ``` Es lohnt sich, etwas Zeit darauf zu verwenden, für jeden Kontrollpunkt passende Musterkarten zu erstellen. Die Modellkarten sollten die spezifischen Merkmale dieses bestimmten Prüfpunkts hervorheben, * z.B.* auf welchem Datensatz wurde der Prüfpunkt vortrainiert/abgestimmt? Für welche nachgelagerte Aufgabe sollte das Modell verwendet werden? Und fügen Sie auch etwas Code bei, wie Sie wie das Modell korrekt verwendet wird. **13. (Optional) Notizbuch hinzufügen** Es ist sehr hilfreich, ein Notizbuch hinzuzufügen, in dem im Detail gezeigt wird, wie *brand_new_bert* für Schlussfolgerungen verwendet werden kann und/oder bei einer nachgelagerten Aufgabe feinabgestimmt wird. Dies ist nicht zwingend erforderlich, um Ihren PR zusammenzuführen, aber sehr nützlich für die Gemeinschaft. **14. Reichen Sie Ihren fertigen PR ein** Sie sind jetzt mit der Programmierung fertig und können zum letzten Schritt übergehen, nämlich der Zusammenführung Ihres PR mit main. Normalerweise hat das Hugging Face Team Ihnen an diesem Punkt bereits geholfen haben, aber es lohnt sich, sich etwas Zeit zu nehmen, um Ihrem fertigen PR eine schöne Beschreibung zu geben und eventuell Kommentare zu Ihrem Code hinzuzufügen, wenn Sie Ihren Gutachter auf bestimmte Designentscheidungen hinweisen wollen. Gutachter hinweisen wollen. ### Teilen Sie Ihre Arbeit!! Jetzt ist es an der Zeit, von der Community Anerkennung für Ihre Arbeit zu bekommen! Die Fertigstellung einer Modellergänzung ist ein wichtiger Beitrag zu Transformers und der gesamten NLP-Gemeinschaft. Ihr Code und die portierten vortrainierten Modelle werden sicherlich von Hunderten und vielleicht sogar Tausenden von Entwicklern und Forschern genutzt werden. Sie sollten stolz auf Ihre Arbeit sein und Ihre Ihre Leistung mit der Gemeinschaft teilen. **Sie haben ein weiteres Modell erstellt, das für jeden in der Community super einfach zugänglich ist! 🤯**
transformers/docs/source/de/add_new_model.md/0
{ "file_path": "transformers/docs/source/de/add_new_model.md", "repo_id": "transformers", "token_count": 24185 }
246
<!--- Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Contribute to 🤗 Transformers Everyone is welcome to contribute, and we value everybody's contribution. Code contributions are not the only way to help the community. Answering questions, helping others, and improving the documentation are also immensely valuable. It also helps us if you spread the word! Reference the library in blog posts about the awesome projects it made possible, shout out on Twitter every time it has helped you, or simply ⭐️ the repository to say thank you. However you choose to contribute, please be mindful and respect our [code of conduct](https://github.com/huggingface/transformers/blob/main/CODE_OF_CONDUCT.md). **This guide was heavily inspired by the awesome [scikit-learn guide to contributing](https://github.com/scikit-learn/scikit-learn/blob/main/CONTRIBUTING.md).** ## Ways to contribute There are several ways you can contribute to 🤗 Transformers: * Fix outstanding issues with the existing code. * Submit issues related to bugs or desired new features. * Implement new models. * Contribute to the examples or to the documentation. If you don't know where to start, there is a special [Good First Issue](https://github.com/huggingface/transformers/contribute) listing. It will give you a list of open issues that are beginner-friendly and help you start contributing to open-source. The best way to do that is to open a Pull Request and link it to the issue that you'd like to work on. We try to give priority to opened PRs as we can easily track the progress of the fix, and if the contributor does not have time anymore, someone else can take the PR over. For something slightly more challenging, you can also take a look at the [Good Second Issue](https://github.com/huggingface/transformers/labels/Good%20Second%20Issue) list. In general though, if you feel like you know what you're doing, go for it and we'll help you get there! 🚀 > All contributions are equally valuable to the community. 🥰 ## Fixing outstanding issues If you notice an issue with the existing code and have a fix in mind, feel free to [start contributing](#create-a-pull-request) and open a Pull Request! ## Submitting a bug-related issue or feature request Do your best to follow these guidelines when submitting a bug-related issue or a feature request. It will make it easier for us to come back to you quickly and with good feedback. ### Did you find a bug? The 🤗 Transformers library is robust and reliable thanks to users who report the problems they encounter. Before you report an issue, we would really appreciate it if you could **make sure the bug was not already reported** (use the search bar on GitHub under Issues). Your issue should also be related to bugs in the library itself, and not your code. If you're unsure whether the bug is in your code or the library, please ask in the [forum](https://discuss.huggingface.co/) first. This helps us respond quicker to fixing issues related to the library versus general questions. Once you've confirmed the bug hasn't already been reported, please include the following information in your issue so we can quickly resolve it: * Your **OS type and version** and **Python**, **PyTorch** and **TensorFlow** versions when applicable. * A short, self-contained, code snippet that allows us to reproduce the bug in less than 30s. * The *full* traceback if an exception is raised. * Attach any other additional information, like screenshots, you think may help. To get the OS and software versions automatically, run the following command: ```bash transformers-cli env ``` You can also run the same command from the root of the repository: ```bash python src/transformers/commands/transformers_cli.py env ``` ### Do you want a new feature? If there is a new feature you'd like to see in 🤗 Transformers, please open an issue and describe: 1. What is the *motivation* behind this feature? Is it related to a problem or frustration with the library? Is it a feature related to something you need for a project? Is it something you worked on and think it could benefit the community? Whatever it is, we'd love to hear about it! 2. Describe your requested feature in as much detail as possible. The more you can tell us about it, the better we'll be able to help you. 3. Provide a *code snippet* that demonstrates the features usage. 4. If the feature is related to a paper, please include a link. If your issue is well written we're already 80% of the way there by the time you create it. We have added [templates](https://github.com/huggingface/transformers/tree/main/templates) to help you get started with your issue. ## Do you want to implement a new model? New models are constantly released and if you want to implement a new model, please provide the following information: * A short description of the model and a link to the paper. * Link to the implementation if it is open-sourced. * Link to the model weights if they are available. If you are willing to contribute the model yourself, let us know so we can help you add it to 🤗 Transformers! We have added a [detailed guide and templates](https://github.com/huggingface/transformers/tree/main/templates) to help you get started with adding a new model, and we also have a more technical guide for [how to add a model to 🤗 Transformers](https://huggingface.co/docs/transformers/add_new_model). ## Do you want to add documentation? We're always looking for improvements to the documentation that make it more clear and accurate. Please let us know how the documentation can be improved such as typos and any content that is missing, unclear or inaccurate. We'll be happy to make the changes or help you make a contribution if you're interested! For more details about how to generate, build, and write the documentation, take a look at the documentation [README](https://github.com/huggingface/transformers/tree/main/docs). ## Create a Pull Request Before writing any code, we strongly advise you to search through the existing PRs or issues to make sure nobody is already working on the same thing. If you are unsure, it is always a good idea to open an issue to get some feedback. You will need basic `git` proficiency to contribute to 🤗 Transformers. While `git` is not the easiest tool to use, it has the greatest manual. Type `git --help` in a shell and enjoy! If you prefer books, [Pro Git](https://git-scm.com/book/en/v2) is a very good reference. You'll need **[Python 3.8](https://github.com/huggingface/transformers/blob/main/setup.py#L426)** or above to contribute to 🤗 Transformers. Follow the steps below to start contributing: 1. Fork the [repository](https://github.com/huggingface/transformers) by clicking on the **[Fork](https://github.com/huggingface/transformers/fork)** button on the repository's page. This creates a copy of the code under your GitHub user account. 2. Clone your fork to your local disk, and add the base repository as a remote: ```bash git clone [email protected]:<your Github handle>/transformers.git cd transformers git remote add upstream https://github.com/huggingface/transformers.git ``` 3. Create a new branch to hold your development changes: ```bash git checkout -b a-descriptive-name-for-my-changes ``` 🚨 **Do not** work on the `main` branch! 4. Set up a development environment by running the following command in a virtual environment: ```bash pip install -e ".[dev]" ``` If 🤗 Transformers was already installed in the virtual environment, remove it with `pip uninstall transformers` before reinstalling it in editable mode with the `-e` flag. Depending on your OS, and since the number of optional dependencies of Transformers is growing, you might get a failure with this command. If that's the case make sure to install the Deep Learning framework you are working with (PyTorch, TensorFlow and/or Flax) then do: ```bash pip install -e ".[quality]" ``` which should be enough for most use cases. 5. Develop the features in your branch. As you work on your code, you should make sure the test suite passes. Run the tests impacted by your changes like this: ```bash pytest tests/<TEST_TO_RUN>.py ``` For more information about tests, check out the [Testing](https://huggingface.co/docs/transformers/testing) guide. 🤗 Transformers relies on `black` and `ruff` to format its source code consistently. After you make changes, apply automatic style corrections and code verifications that can't be automated in one go with: ```bash make fixup ``` This target is also optimized to only work with files modified by the PR you're working on. If you prefer to run the checks one after the other, the following command applies the style corrections: ```bash make style ``` 🤗 Transformers also uses `ruff` and a few custom scripts to check for coding mistakes. Quality controls are run by the CI, but you can run the same checks with: ```bash make quality ``` Finally, we have a lot of scripts to make sure we don't forget to update some files when adding a new model. You can run these scripts with: ```bash make repo-consistency ``` To learn more about those checks and how to fix any issues with them, check out the [Checks on a Pull Request](https://huggingface.co/docs/transformers/pr_checks) guide. If you're modifying documents under the `docs/source` directory, make sure the documentation can still be built. This check will also run in the CI when you open a pull request. To run a local check make sure you install the documentation builder: ```bash pip install ".[docs]" ``` Run the following command from the root of the repository: ```bash doc-builder build transformers docs/source/en --build_dir ~/tmp/test-build ``` This will build the documentation in the `~/tmp/test-build` folder where you can inspect the generated Markdown files with your favorite editor. You can also preview the docs on GitHub when you open a pull request. Once you're happy with your changes, add the changed files with `git add` and record your changes locally with `git commit`: ```bash git add modified_file.py git commit ``` Please remember to write [good commit messages](https://chris.beams.io/posts/git-commit/) to clearly communicate the changes you made! To keep your copy of the code up to date with the original repository, rebase your branch on `upstream/branch` *before* you open a pull request or if requested by a maintainer: ```bash git fetch upstream git rebase upstream/main ``` Push your changes to your branch: ```bash git push -u origin a-descriptive-name-for-my-changes ``` If you've already opened a pull request, you'll need to force push with the `--force` flag. Otherwise, if the pull request hasn't been opened yet, you can just push your changes normally. 6. Now you can go to your fork of the repository on GitHub and click on **Pull Request** to open a pull request. Make sure you tick off all the boxes on our [checklist](#pull-request-checklist) below. When you're ready, you can send your changes to the project maintainers for review. 7. It's ok if maintainers request changes, it happens to our core contributors too! So everyone can see the changes in the pull request, work in your local branch and push the changes to your fork. They will automatically appear in the pull request. ### Pull request checklist ☐ The pull request title should summarize your contribution.<br> ☐ If your pull request addresses an issue, please mention the issue number in the pull request description to make sure they are linked (and people viewing the issue know you are working on it).<br> ☐ To indicate a work in progress please prefix the title with `[WIP]`. These are useful to avoid duplicated work, and to differentiate it from PRs ready to be merged.<br> ☐ Make sure existing tests pass.<br> ☐ If adding a new feature, also add tests for it.<br> - If you are adding a new model, make sure you use `ModelTester.all_model_classes = (MyModel, MyModelWithLMHead,...)` to trigger the common tests. - If you are adding new `@slow` tests, make sure they pass using `RUN_SLOW=1 python -m pytest tests/models/my_new_model/test_my_new_model.py`. - If you are adding a new tokenizer, write tests and make sure `RUN_SLOW=1 python -m pytest tests/models/{your_model_name}/test_tokenization_{your_model_name}.py` passes. - CircleCI does not run the slow tests, but GitHub Actions does every night!<br> ☐ All public methods must have informative docstrings (see [`modeling_bert.py`](https://github.com/huggingface/transformers/blob/main/src/transformers/models/bert/modeling_bert.py) for an example).<br> ☐ Due to the rapidly growing repository, don't add any images, videos and other non-text files that'll significantly weigh down the repository. Instead, use a Hub repository such as [`hf-internal-testing`](https://huggingface.co/hf-internal-testing) to host these files and reference them by URL. We recommend placing documentation related images in the following repository: [huggingface/documentation-images](https://huggingface.co/datasets/huggingface/documentation-images). You can open a PR on this dataset repository and ask a Hugging Face member to merge it. For more information about the checks run on a pull request, take a look at our [Checks on a Pull Request](https://huggingface.co/docs/transformers/pr_checks) guide. ### Tests An extensive test suite is included to test the library behavior and several examples. Library tests can be found in the [tests](https://github.com/huggingface/transformers/tree/main/tests) folder and examples tests in the [examples](https://github.com/huggingface/transformers/tree/main/examples) folder. We like `pytest` and `pytest-xdist` because it's faster. From the root of the repository, specify a *path to a subfolder or a test file* to run the test: ```bash python -m pytest -n auto --dist=loadfile -s -v ./tests/models/my_new_model ``` Similarly, for the `examples` directory, specify a *path to a subfolder or test file* to run the test. For example, the following command tests the text classification subfolder in the PyTorch `examples` directory: ```bash pip install -r examples/xxx/requirements.txt # only needed the first time python -m pytest -n auto --dist=loadfile -s -v ./examples/pytorch/text-classification ``` In fact, this is actually how our `make test` and `make test-examples` commands are implemented (not including the `pip install`)! You can also specify a smaller set of tests in order to test only the feature you're working on. By default, slow tests are skipped but you can set the `RUN_SLOW` environment variable to `yes` to run them. This will download many gigabytes of models so make sure you have enough disk space, a good internet connection or a lot of patience! <Tip warning={true}> Remember to specify a *path to a subfolder or a test file* to run the test. Otherwise, you'll run all the tests in the `tests` or `examples` folder, which will take a very long time! </Tip> ```bash RUN_SLOW=yes python -m pytest -n auto --dist=loadfile -s -v ./tests/models/my_new_model RUN_SLOW=yes python -m pytest -n auto --dist=loadfile -s -v ./examples/pytorch/text-classification ``` Like the slow tests, there are other environment variables available which not enabled by default during testing: - `RUN_CUSTOM_TOKENIZERS`: Enables tests for custom tokenizers. - `RUN_PT_FLAX_CROSS_TESTS`: Enables tests for PyTorch + Flax integration. - `RUN_PT_TF_CROSS_TESTS`: Enables tests for TensorFlow + PyTorch integration. More environment variables and additional information can be found in the [testing_utils.py](src/transformers/testing_utils.py). 🤗 Transformers uses `pytest` as a test runner only. It doesn't use any `pytest`-specific features in the test suite itself. This means `unittest` is fully supported. Here's how to run tests with `unittest`: ```bash python -m unittest discover -s tests -t . -v python -m unittest discover -s examples -t examples -v ``` ### Style guide For documentation strings, 🤗 Transformers follows the [Google Python Style Guide](https://google.github.io/styleguide/pyguide.html). Check our [documentation writing guide](https://github.com/huggingface/transformers/tree/main/docs#writing-documentation---specification) for more information. ### Develop on Windows On Windows (unless you're working in [Windows Subsystem for Linux](https://learn.microsoft.com/en-us/windows/wsl/) or WSL), you need to configure git to transform Windows `CRLF` line endings to Linux `LF` line endings: ```bash git config core.autocrlf input ``` One way to run the `make` command on Windows is with MSYS2: 1. [Download MSYS2](https://www.msys2.org/), and we assume it's installed in `C:\msys64`. 2. Open the command line `C:\msys64\msys2.exe` (it should be available from the **Start** menu). 3. Run in the shell: `pacman -Syu` and install `make` with `pacman -S make`. 4. Add `C:\msys64\usr\bin` to your PATH environment variable. You can now use `make` from any terminal (PowerShell, cmd.exe, etc.)! 🎉 ### Sync a forked repository with upstream main (the Hugging Face repository) When updating the main branch of a forked repository, please follow these steps to avoid pinging the upstream repository which adds reference notes to each upstream PR, and sends unnecessary notifications to the developers involved in these PRs. 1. When possible, avoid syncing with the upstream using a branch and PR on the forked repository. Instead, merge directly into the forked main. 2. If a PR is absolutely necessary, use the following steps after checking out your branch: ```bash git checkout -b your-branch-for-syncing git pull --squash --no-commit upstream main git commit -m '<your message without GitHub references>' git push --set-upstream origin your-branch-for-syncing ```
transformers/docs/source/en/contributing.md/0
{ "file_path": "transformers/docs/source/en/contributing.md", "repo_id": "transformers", "token_count": 5137 }
247
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Utilities for Generation This page lists all the utility functions used by [`~generation.GenerationMixin.generate`]. ## Generate Outputs The output of [`~generation.GenerationMixin.generate`] is an instance of a subclass of [`~utils.ModelOutput`]. This output is a data structure containing all the information returned by [`~generation.GenerationMixin.generate`], but that can also be used as tuple or dictionary. Here's an example: ```python from transformers import GPT2Tokenizer, GPT2LMHeadModel tokenizer = GPT2Tokenizer.from_pretrained("openai-community/gpt2") model = GPT2LMHeadModel.from_pretrained("openai-community/gpt2") inputs = tokenizer("Hello, my dog is cute and ", return_tensors="pt") generation_output = model.generate(**inputs, return_dict_in_generate=True, output_scores=True) ``` The `generation_output` object is a [`~generation.GenerateDecoderOnlyOutput`], as we can see in the documentation of that class below, it means it has the following attributes: - `sequences`: the generated sequences of tokens - `scores` (optional): the prediction scores of the language modelling head, for each generation step - `hidden_states` (optional): the hidden states of the model, for each generation step - `attentions` (optional): the attention weights of the model, for each generation step Here we have the `scores` since we passed along `output_scores=True`, but we don't have `hidden_states` and `attentions` because we didn't pass `output_hidden_states=True` or `output_attentions=True`. You can access each attribute as you would usually do, and if that attribute has not been returned by the model, you will get `None`. Here for instance `generation_output.scores` are all the generated prediction scores of the language modeling head, and `generation_output.attentions` is `None`. When using our `generation_output` object as a tuple, it only keeps the attributes that don't have `None` values. Here, for instance, it has two elements, `loss` then `logits`, so ```python generation_output[:2] ``` will return the tuple `(generation_output.sequences, generation_output.scores)` for instance. When using our `generation_output` object as a dictionary, it only keeps the attributes that don't have `None` values. Here, for instance, it has two keys that are `sequences` and `scores`. We document here all output types. ### PyTorch [[autodoc]] generation.GenerateDecoderOnlyOutput [[autodoc]] generation.GenerateEncoderDecoderOutput [[autodoc]] generation.GenerateBeamDecoderOnlyOutput [[autodoc]] generation.GenerateBeamEncoderDecoderOutput ### TensorFlow [[autodoc]] generation.TFGreedySearchEncoderDecoderOutput [[autodoc]] generation.TFGreedySearchDecoderOnlyOutput [[autodoc]] generation.TFSampleEncoderDecoderOutput [[autodoc]] generation.TFSampleDecoderOnlyOutput [[autodoc]] generation.TFBeamSearchEncoderDecoderOutput [[autodoc]] generation.TFBeamSearchDecoderOnlyOutput [[autodoc]] generation.TFBeamSampleEncoderDecoderOutput [[autodoc]] generation.TFBeamSampleDecoderOnlyOutput [[autodoc]] generation.TFContrastiveSearchEncoderDecoderOutput [[autodoc]] generation.TFContrastiveSearchDecoderOnlyOutput ### FLAX [[autodoc]] generation.FlaxSampleOutput [[autodoc]] generation.FlaxGreedySearchOutput [[autodoc]] generation.FlaxBeamSearchOutput ## LogitsProcessor A [`LogitsProcessor`] can be used to modify the prediction scores of a language model head for generation. ### PyTorch [[autodoc]] AlternatingCodebooksLogitsProcessor - __call__ [[autodoc]] ClassifierFreeGuidanceLogitsProcessor - __call__ [[autodoc]] EncoderNoRepeatNGramLogitsProcessor - __call__ [[autodoc]] EncoderRepetitionPenaltyLogitsProcessor - __call__ [[autodoc]] EpsilonLogitsWarper - __call__ [[autodoc]] EtaLogitsWarper - __call__ [[autodoc]] ExponentialDecayLengthPenalty - __call__ [[autodoc]] ForcedBOSTokenLogitsProcessor - __call__ [[autodoc]] ForcedEOSTokenLogitsProcessor - __call__ [[autodoc]] ForceTokensLogitsProcessor - __call__ [[autodoc]] HammingDiversityLogitsProcessor - __call__ [[autodoc]] InfNanRemoveLogitsProcessor - __call__ [[autodoc]] LogitNormalization - __call__ [[autodoc]] LogitsProcessor - __call__ [[autodoc]] LogitsProcessorList - __call__ [[autodoc]] LogitsWarper - __call__ [[autodoc]] MinLengthLogitsProcessor - __call__ [[autodoc]] MinNewTokensLengthLogitsProcessor - __call__ [[autodoc]] NoBadWordsLogitsProcessor - __call__ [[autodoc]] NoRepeatNGramLogitsProcessor - __call__ [[autodoc]] PrefixConstrainedLogitsProcessor - __call__ [[autodoc]] RepetitionPenaltyLogitsProcessor - __call__ [[autodoc]] SequenceBiasLogitsProcessor - __call__ [[autodoc]] SuppressTokensAtBeginLogitsProcessor - __call__ [[autodoc]] SuppressTokensLogitsProcessor - __call__ [[autodoc]] TemperatureLogitsWarper - __call__ [[autodoc]] TopKLogitsWarper - __call__ [[autodoc]] TopPLogitsWarper - __call__ [[autodoc]] TypicalLogitsWarper - __call__ [[autodoc]] UnbatchedClassifierFreeGuidanceLogitsProcessor - __call__ [[autodoc]] WhisperTimeStampLogitsProcessor - __call__ ### TensorFlow [[autodoc]] TFForcedBOSTokenLogitsProcessor - __call__ [[autodoc]] TFForcedEOSTokenLogitsProcessor - __call__ [[autodoc]] TFForceTokensLogitsProcessor - __call__ [[autodoc]] TFLogitsProcessor - __call__ [[autodoc]] TFLogitsProcessorList - __call__ [[autodoc]] TFLogitsWarper - __call__ [[autodoc]] TFMinLengthLogitsProcessor - __call__ [[autodoc]] TFNoBadWordsLogitsProcessor - __call__ [[autodoc]] TFNoRepeatNGramLogitsProcessor - __call__ [[autodoc]] TFRepetitionPenaltyLogitsProcessor - __call__ [[autodoc]] TFSuppressTokensAtBeginLogitsProcessor - __call__ [[autodoc]] TFSuppressTokensLogitsProcessor - __call__ [[autodoc]] TFTemperatureLogitsWarper - __call__ [[autodoc]] TFTopKLogitsWarper - __call__ [[autodoc]] TFTopPLogitsWarper - __call__ ### FLAX [[autodoc]] FlaxForcedBOSTokenLogitsProcessor - __call__ [[autodoc]] FlaxForcedEOSTokenLogitsProcessor - __call__ [[autodoc]] FlaxForceTokensLogitsProcessor - __call__ [[autodoc]] FlaxLogitsProcessor - __call__ [[autodoc]] FlaxLogitsProcessorList - __call__ [[autodoc]] FlaxLogitsWarper - __call__ [[autodoc]] FlaxMinLengthLogitsProcessor - __call__ [[autodoc]] FlaxSuppressTokensAtBeginLogitsProcessor - __call__ [[autodoc]] FlaxSuppressTokensLogitsProcessor - __call__ [[autodoc]] FlaxTemperatureLogitsWarper - __call__ [[autodoc]] FlaxTopKLogitsWarper - __call__ [[autodoc]] FlaxTopPLogitsWarper - __call__ [[autodoc]] FlaxWhisperTimeStampLogitsProcessor - __call__ ## StoppingCriteria A [`StoppingCriteria`] can be used to change when to stop generation (other than EOS token). Please note that this is exclusively available to our PyTorch implementations. [[autodoc]] StoppingCriteria - __call__ [[autodoc]] StoppingCriteriaList - __call__ [[autodoc]] MaxLengthCriteria - __call__ [[autodoc]] MaxTimeCriteria - __call__ ## Constraints A [`Constraint`] can be used to force the generation to include specific tokens or sequences in the output. Please note that this is exclusively available to our PyTorch implementations. [[autodoc]] Constraint [[autodoc]] PhrasalConstraint [[autodoc]] DisjunctiveConstraint [[autodoc]] ConstraintListState ## BeamSearch [[autodoc]] BeamScorer - process - finalize [[autodoc]] BeamSearchScorer - process - finalize [[autodoc]] ConstrainedBeamSearchScorer - process - finalize ## Streamers [[autodoc]] TextStreamer [[autodoc]] TextIteratorStreamer ## Caches [[autodoc]] Cache - update [[autodoc]] DynamicCache - update - get_seq_length - reorder_cache - to_legacy_cache - from_legacy_cache [[autodoc]] SinkCache - update - get_seq_length - reorder_cache [[autodoc]] StaticCache - update - get_seq_length
transformers/docs/source/en/internal/generation_utils.md/0
{ "file_path": "transformers/docs/source/en/internal/generation_utils.md", "repo_id": "transformers", "token_count": 2989 }
248
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Image Processor An image processor is in charge of preparing input features for vision models and post processing their outputs. This includes transformations such as resizing, normalization, and conversion to PyTorch, TensorFlow, Flax and Numpy tensors. It may also include model specific post-processing such as converting logits to segmentation masks. ## ImageProcessingMixin [[autodoc]] image_processing_utils.ImageProcessingMixin - from_pretrained - save_pretrained ## BatchFeature [[autodoc]] BatchFeature ## BaseImageProcessor [[autodoc]] image_processing_utils.BaseImageProcessor
transformers/docs/source/en/main_classes/image_processor.md/0
{ "file_path": "transformers/docs/source/en/main_classes/image_processor.md", "repo_id": "transformers", "token_count": 343 }
249
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Audio Spectrogram Transformer ## Overview The Audio Spectrogram Transformer model was proposed in [AST: Audio Spectrogram Transformer](https://arxiv.org/abs/2104.01778) by Yuan Gong, Yu-An Chung, James Glass. The Audio Spectrogram Transformer applies a [Vision Transformer](vit) to audio, by turning audio into an image (spectrogram). The model obtains state-of-the-art results for audio classification. The abstract from the paper is the following: *In the past decade, convolutional neural networks (CNNs) have been widely adopted as the main building block for end-to-end audio classification models, which aim to learn a direct mapping from audio spectrograms to corresponding labels. To better capture long-range global context, a recent trend is to add a self-attention mechanism on top of the CNN, forming a CNN-attention hybrid model. However, it is unclear whether the reliance on a CNN is necessary, and if neural networks purely based on attention are sufficient to obtain good performance in audio classification. In this paper, we answer the question by introducing the Audio Spectrogram Transformer (AST), the first convolution-free, purely attention-based model for audio classification. We evaluate AST on various audio classification benchmarks, where it achieves new state-of-the-art results of 0.485 mAP on AudioSet, 95.6% accuracy on ESC-50, and 98.1% accuracy on Speech Commands V2.* <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/audio_spectogram_transformer_architecture.png" alt="drawing" width="600"/> <small> Audio Spectrogram Transformer architecture. Taken from the <a href="https://arxiv.org/abs/2104.01778">original paper</a>.</small> This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/YuanGongND/ast). ## Usage tips - When fine-tuning the Audio Spectrogram Transformer (AST) on your own dataset, it's recommended to take care of the input normalization (to make sure the input has mean of 0 and std of 0.5). [`ASTFeatureExtractor`] takes care of this. Note that it uses the AudioSet mean and std by default. You can check [`ast/src/get_norm_stats.py`](https://github.com/YuanGongND/ast/blob/master/src/get_norm_stats.py) to see how the authors compute the stats for a downstream dataset. - Note that the AST needs a low learning rate (the authors use a 10 times smaller learning rate compared to their CNN model proposed in the [PSLA paper](https://arxiv.org/abs/2102.01243)) and converges quickly, so please search for a suitable learning rate and learning rate scheduler for your task. ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with the Audio Spectrogram Transformer. <PipelineTag pipeline="audio-classification"/> - A notebook illustrating inference with AST for audio classification can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/AST). - [`ASTForAudioClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/audio-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/audio_classification.ipynb). - See also: [Audio classification](../tasks/audio_classification). If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. ## ASTConfig [[autodoc]] ASTConfig ## ASTFeatureExtractor [[autodoc]] ASTFeatureExtractor - __call__ ## ASTModel [[autodoc]] ASTModel - forward ## ASTForAudioClassification [[autodoc]] ASTForAudioClassification - forward
transformers/docs/source/en/model_doc/audio-spectrogram-transformer.md/0
{ "file_path": "transformers/docs/source/en/model_doc/audio-spectrogram-transformer.md", "repo_id": "transformers", "token_count": 1220 }
250
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Blenderbot Small Note that [`BlenderbotSmallModel`] and [`BlenderbotSmallForConditionalGeneration`] are only used in combination with the checkpoint [facebook/blenderbot-90M](https://huggingface.co/facebook/blenderbot-90M). Larger Blenderbot checkpoints should instead be used with [`BlenderbotModel`] and [`BlenderbotForConditionalGeneration`] ## Overview The Blender chatbot model was proposed in [Recipes for building an open-domain chatbot](https://arxiv.org/pdf/2004.13637.pdf) Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston on 30 Apr 2020. The abstract of the paper is the following: *Building open-domain chatbots is a challenging area for machine learning research. While prior work has shown that scaling neural models in the number of parameters and the size of the data they are trained on gives improved results, we show that other ingredients are important for a high-performing chatbot. Good conversation requires a number of skills that an expert conversationalist blends in a seamless way: providing engaging talking points and listening to their partners, and displaying knowledge, empathy and personality appropriately, while maintaining a consistent persona. We show that large scale models can learn these skills when given appropriate training data and choice of generation strategy. We build variants of these recipes with 90M, 2.7B and 9.4B parameter models, and make our models and code publicly available. Human evaluations show our best models are superior to existing approaches in multi-turn dialogue in terms of engagingness and humanness measurements. We then discuss the limitations of this work by analyzing failure cases of our models.* This model was contributed by [patrickvonplaten](https://huggingface.co/patrickvonplaten). The authors' code can be found [here](https://github.com/facebookresearch/ParlAI). ## Usage tips Blenderbot Small is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. ## Resources - [Causal language modeling task guide](../tasks/language_modeling) - [Translation task guide](../tasks/translation) - [Summarization task guide](../tasks/summarization) ## BlenderbotSmallConfig [[autodoc]] BlenderbotSmallConfig ## BlenderbotSmallTokenizer [[autodoc]] BlenderbotSmallTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## BlenderbotSmallTokenizerFast [[autodoc]] BlenderbotSmallTokenizerFast <frameworkcontent> <pt> ## BlenderbotSmallModel [[autodoc]] BlenderbotSmallModel - forward ## BlenderbotSmallForConditionalGeneration [[autodoc]] BlenderbotSmallForConditionalGeneration - forward ## BlenderbotSmallForCausalLM [[autodoc]] BlenderbotSmallForCausalLM - forward </pt> <tf> ## TFBlenderbotSmallModel [[autodoc]] TFBlenderbotSmallModel - call ## TFBlenderbotSmallForConditionalGeneration [[autodoc]] TFBlenderbotSmallForConditionalGeneration - call </tf> <jax> ## FlaxBlenderbotSmallModel [[autodoc]] FlaxBlenderbotSmallModel - __call__ - encode - decode ## FlaxBlenderbotForConditionalGeneration [[autodoc]] FlaxBlenderbotSmallForConditionalGeneration - __call__ - encode - decode </jax> </frameworkcontent>
transformers/docs/source/en/model_doc/blenderbot-small.md/0
{ "file_path": "transformers/docs/source/en/model_doc/blenderbot-small.md", "repo_id": "transformers", "token_count": 1170 }
251
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # DeiT ## Overview The DeiT model was proposed in [Training data-efficient image transformers & distillation through attention](https://arxiv.org/abs/2012.12877) by Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Hervé Jégou. The [Vision Transformer (ViT)](vit) introduced in [Dosovitskiy et al., 2020](https://arxiv.org/abs/2010.11929) has shown that one can match or even outperform existing convolutional neural networks using a Transformer encoder (BERT-like). However, the ViT models introduced in that paper required training on expensive infrastructure for multiple weeks, using external data. DeiT (data-efficient image transformers) are more efficiently trained transformers for image classification, requiring far less data and far less computing resources compared to the original ViT models. The abstract from the paper is the following: *Recently, neural networks purely based on attention were shown to address image understanding tasks such as image classification. However, these visual transformers are pre-trained with hundreds of millions of images using an expensive infrastructure, thereby limiting their adoption. In this work, we produce a competitive convolution-free transformer by training on Imagenet only. We train them on a single computer in less than 3 days. Our reference vision transformer (86M parameters) achieves top-1 accuracy of 83.1% (single-crop evaluation) on ImageNet with no external data. More importantly, we introduce a teacher-student strategy specific to transformers. It relies on a distillation token ensuring that the student learns from the teacher through attention. We show the interest of this token-based distillation, especially when using a convnet as a teacher. This leads us to report results competitive with convnets for both Imagenet (where we obtain up to 85.2% accuracy) and when transferring to other tasks. We share our code and models.* This model was contributed by [nielsr](https://huggingface.co/nielsr). The TensorFlow version of this model was added by [amyeroberts](https://huggingface.co/amyeroberts). ## Usage tips - Compared to ViT, DeiT models use a so-called distillation token to effectively learn from a teacher (which, in the DeiT paper, is a ResNet like-model). The distillation token is learned through backpropagation, by interacting with the class ([CLS]) and patch tokens through the self-attention layers. - There are 2 ways to fine-tune distilled models, either (1) in a classic way, by only placing a prediction head on top of the final hidden state of the class token and not using the distillation signal, or (2) by placing both a prediction head on top of the class token and on top of the distillation token. In that case, the [CLS] prediction head is trained using regular cross-entropy between the prediction of the head and the ground-truth label, while the distillation prediction head is trained using hard distillation (cross-entropy between the prediction of the distillation head and the label predicted by the teacher). At inference time, one takes the average prediction between both heads as final prediction. (2) is also called "fine-tuning with distillation", because one relies on a teacher that has already been fine-tuned on the downstream dataset. In terms of models, (1) corresponds to [`DeiTForImageClassification`] and (2) corresponds to [`DeiTForImageClassificationWithTeacher`]. - Note that the authors also did try soft distillation for (2) (in which case the distillation prediction head is trained using KL divergence to match the softmax output of the teacher), but hard distillation gave the best results. - All released checkpoints were pre-trained and fine-tuned on ImageNet-1k only. No external data was used. This is in contrast with the original ViT model, which used external data like the JFT-300M dataset/Imagenet-21k for pre-training. - The authors of DeiT also released more efficiently trained ViT models, which you can directly plug into [`ViTModel`] or [`ViTForImageClassification`]. Techniques like data augmentation, optimization, and regularization were used in order to simulate training on a much larger dataset (while only using ImageNet-1k for pre-training). There are 4 variants available (in 3 different sizes): *facebook/deit-tiny-patch16-224*, *facebook/deit-small-patch16-224*, *facebook/deit-base-patch16-224* and *facebook/deit-base-patch16-384*. Note that one should use [`DeiTImageProcessor`] in order to prepare images for the model. ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with DeiT. <PipelineTag pipeline="image-classification"/> - [`DeiTForImageClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb). - See also: [Image classification task guide](../tasks/image_classification) Besides that: - [`DeiTForMaskedImageModeling`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining). If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. ## DeiTConfig [[autodoc]] DeiTConfig ## DeiTFeatureExtractor [[autodoc]] DeiTFeatureExtractor - __call__ ## DeiTImageProcessor [[autodoc]] DeiTImageProcessor - preprocess <frameworkcontent> <pt> ## DeiTModel [[autodoc]] DeiTModel - forward ## DeiTForMaskedImageModeling [[autodoc]] DeiTForMaskedImageModeling - forward ## DeiTForImageClassification [[autodoc]] DeiTForImageClassification - forward ## DeiTForImageClassificationWithTeacher [[autodoc]] DeiTForImageClassificationWithTeacher - forward </pt> <tf> ## TFDeiTModel [[autodoc]] TFDeiTModel - call ## TFDeiTForMaskedImageModeling [[autodoc]] TFDeiTForMaskedImageModeling - call ## TFDeiTForImageClassification [[autodoc]] TFDeiTForImageClassification - call ## TFDeiTForImageClassificationWithTeacher [[autodoc]] TFDeiTForImageClassificationWithTeacher - call </tf> </frameworkcontent>
transformers/docs/source/en/model_doc/deit.md/0
{ "file_path": "transformers/docs/source/en/model_doc/deit.md", "repo_id": "transformers", "token_count": 1955 }
252
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Gemma ## Overview The Gemma model was proposed in [Gemma: Open Models Based on Gemini Technology and Research](https://blog.google/technology/developers/gemma-open-models/) by Gemma Team, Google. Gemma models are trained on 6T tokens, and released with 2 versions, 2b and 7b. The abstract from the paper is the following: *This work introduces Gemma, a new family of open language models demonstrating strong performance across academic benchmarks for language understanding, reasoning, and safety. We release two sizes of models (2 billion and 7 billion parameters), and provide both pretrained and fine-tuned checkpoints. Gemma outperforms similarly sized open models on 11 out of 18 text-based tasks, and we present comprehensive evaluations of safety and responsibility aspects of the models, alongside a detailed description of our model development. We believe the responsible release of LLMs is critical for improving the safety of frontier models, and for enabling the next wave of LLM innovations* Tips: - The original checkpoints can be converted using the conversion script `src/transformers/models/gemma/convert_gemma_weights_to_hf.py` This model was contributed by [Arthur Zucker](https://huggingface.co/ArthurZ), [Younes Belkada](https://huggingface.co/ybelkada), [Sanchit Gandhi](https://huggingface.co/sanchit-gandhi), [Pedro Cuenca](https://huggingface.co/pcuenq). ## GemmaConfig [[autodoc]] GemmaConfig ## GemmaTokenizer [[autodoc]] GemmaTokenizer ## GemmaTokenizerFast [[autodoc]] GemmaTokenizerFast ## GemmaModel [[autodoc]] GemmaModel - forward ## GemmaForCausalLM [[autodoc]] GemmaForCausalLM - forward ## GemmaForSequenceClassification [[autodoc]] GemmaForSequenceClassification - forward ## FlaxGemmaModel [[autodoc]] FlaxGemmaModel - __call__ ## FlaxGemmaForCausalLM [[autodoc]] FlaxGemmaForCausalLM - __call__
transformers/docs/source/en/model_doc/gemma.md/0
{ "file_path": "transformers/docs/source/en/model_doc/gemma.md", "repo_id": "transformers", "token_count": 736 }
253
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # IDEFICS ## Overview The IDEFICS model was proposed in [OBELICS: An Open Web-Scale Filtered Dataset of Interleaved Image-Text Documents ](https://huggingface.co/papers/2306.16527 ) by Hugo Laurençon, Lucile Saulnier, Léo Tronchon, Stas Bekman, Amanpreet Singh, Anton Lozhkov, Thomas Wang, Siddharth Karamcheti, Alexander M. Rush, Douwe Kiela, Matthieu Cord, Victor Sanh The abstract from the paper is the following: *Large multimodal models trained on natural documents, which interleave images and text, outperform models trained on image-text pairs on various multimodal benchmarks that require reasoning over one or multiple images to generate a text. However, the datasets used to train these models have not been released, and the collection process has not been fully specified. We introduce the OBELICS dataset, an open web-scale filtered dataset of interleaved image-text documents comprising 141 million web pages extracted from Common Crawl, 353 million associated images, and 115 billion text tokens. We describe the dataset creation process, present comprehensive filtering rules, and provide an analysis of the dataset's content. To show the viability of OBELISC, we train an 80 billion parameters vision and language model on the dataset and obtain competitive performance on various multimodal benchmarks. We release the code to reproduce the dataset along with the dataset itself.* This model was contributed by [HuggingFaceM4](https://huggingface.co/HuggingFaceM4). The original code can be found [here](<INSERT LINK TO GITHUB REPO HERE>). (TODO: don't have a public link yet). <Tip warning={true}> IDEFICS modeling code in Transformers is for finetuning and inferencing the pre-trained IDEFICS models. To train a new IDEFICS model from scratch use the m4 codebase (a link will be provided once it's made public) </Tip> ## IdeficsConfig [[autodoc]] IdeficsConfig ## IdeficsModel [[autodoc]] IdeficsModel - forward ## IdeficsForVisionText2Text [[autodoc]] IdeficsForVisionText2Text - forward ## IdeficsImageProcessor [[autodoc]] IdeficsImageProcessor - preprocess ## IdeficsProcessor [[autodoc]] IdeficsProcessor - __call__
transformers/docs/source/en/model_doc/idefics.md/0
{ "file_path": "transformers/docs/source/en/model_doc/idefics.md", "repo_id": "transformers", "token_count": 776 }
254
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # LLaVA-NeXT ## Overview The LLaVA-NeXT model was proposed in [LLaVA-NeXT: Improved reasoning, OCR, and world knowledge](https://llava-vl.github.io/blog/2024-01-30-llava-next/) by Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, Yong Jae Lee. LLaVa-NeXT (also called LLaVa-1.6) improves upon [LLaVa](llava) by increasing the input image resolution and training on an improved visual instruction tuning dataset to improve OCR and common sense reasoning. The introduction from the blog is the following: *In October 2023, we released LLaVA-1.5 with a simple and efficient design along with great performance on a benchmark suite of 12 datasets. It has since served as the foundation of many comprehensive studies of data, model, and capabilities of large multimodal models (LMM), and has enabled various new applications. Today, we are thrilled to present LLaVA-NeXT, with improved reasoning, OCR, and world knowledge. LLaVA-NeXT even exceeds Gemini Pro on several benchmarks. Compared with LLaVA-1.5, LLaVA-NeXT has several improvements: Increasing the input image resolution to 4x more pixels. This allows it to grasp more visual details. It supports three aspect ratios, up to 672x672, 336x1344, 1344x336 resolution. Better visual reasoning and OCR capability with an improved visual instruction tuning data mixture. Better visual conversation for more scenarios, covering different applications. Better world knowledge and logical reasoning. Efficient deployment and inference with SGLang. Along with performance improvements, LLaVA-NeXT maintains the minimalist design and data efficiency of LLaVA-1.5. It re-uses the pretrained connector of LLaVA-1.5, and still uses less than 1M visual instruction tuning samples. The largest 34B variant finishes training in ~1 day with 32 A100s.* <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/llava_next_overview.png" alt="drawing" width="600"/> <small> LLaVa-NeXT incorporates a higher input resolution by encoding various patches of the input image. Taken from the <a href="https://arxiv.org/abs/2310.03744">original paper.</a> </small> This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/haotian-liu/LLaVA/tree/main). ## Usage tips - We advise users to use `padding_side="left"` when computing batched generation as it leads to more accurate results. Simply make sure to call `processor.tokenizer.padding_side = "left"` before generating. - Note that each checkpoint has been trained with a specific prompt format, depending on which large language model (LLM) was used. Below, we list the correct prompt formats to use for the text prompt "What is shown in this image?": [llava-v1.6-mistral-7b-hf](https://huggingface.co/llava-hf/llava-v1.6-mistral-7b-hf) requires the following format: ```bash "[INST] <image>\nWhat is shown in this image? [/INST]" ``` [llava-v1.6-vicuna-7b-hf](https://huggingface.co/llava-hf/llava-v1.6-vicuna-7b-hf) and [llava-v1.6-vicuna-13b-hf](https://huggingface.co/llava-hf/llava-v1.6-vicuna-13b-hf) require the following format: ```bash "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions. USER: <image>\nWhat is shown in this image? ASSISTANT:" ``` [llava-v1.6-34b-hf](https://huggingface.co/llava-hf/llava-v1.6-34b-hf) requires the following format: ```bash "<|im_start|>system\nAnswer the questions.<|im_end|><|im_start|>user\n<image>\nWhat is shown in this image?<|im_end|><|im_start|>assistant\n" ``` ## Usage example Here's how to load the model and perform inference in half-precision (`torch.float16`): ```python from transformers import LlavaNextProcessor, LlavaNextForConditionalGeneration import torch from PIL import Image import requests processor = LlavaNextProcessor.from_pretrained("llava-hf/llava-v1.6-mistral-7b-hf") model = LlavaNextForConditionalGeneration.from_pretrained("llava-hf/llava-v1.6-mistral-7b-hf", torch_dtype=torch.float16, low_cpu_mem_usage=True) model.to("cuda:0") # prepare image and text prompt, using the appropriate prompt template url = "https://github.com/haotian-liu/LLaVA/blob/1a91fc274d7c35a9b50b3cb29c4247ae5837ce39/images/llava_v1_5_radar.jpg?raw=true" image = Image.open(requests.get(url, stream=True).raw) prompt = "[INST] <image>\nWhat is shown in this image? [/INST]" inputs = processor(prompt, image, return_tensors="pt").to("cuda:0") # autoregressively complete prompt output = model.generate(**inputs, max_new_tokens=100) print(processor.decode(output[0], skip_special_tokens=True)) ``` ## Model optimization ### Quantization using Bitsandbytes The model can be loaded in 8 or 4 bits, greatly reducing the memory requirements while maintaining the performance of the original model. First make sure to install bitsandbytes, `pip install bitsandbytes`` and make sure to have access to a CUDA compatible GPU device. Simply change the snippet above with: ```python from transformers import LlavaNextForConditionalGeneration, BitsandBytesConfig # specify how to quantize the model quantization_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype="torch.float16", ) model = LlavaNextForConditionalGeneration.from_pretrained("llava-hf/llava-v1.6-mistral-7b-hf", quantization_config=quantization_config, device_map="auto") ``` ### Use Flash-Attention 2 to further speed-up generation First make sure to install flash-attn. Refer to the [original repository of Flash Attention](https://github.com/Dao-AILab/flash-attention) regarding that package installation. Simply change the snippet above with: ```python from transformers import LlavaNextForConditionalGeneration model = LlavaNextForConditionalGeneration.from_pretrained( model_id, torch_dtype=torch.float16, low_cpu_mem_usage=True, use_flash_attention_2=True ).to(0) ``` ## LlavaNextConfig [[autodoc]] LlavaNextConfig ## LlavaNextImageProcessor [[autodoc]] LlavaNextImageProcessor - preprocess ## LlavaNextProcessor [[autodoc]] LlavaNextProcessor ## LlavaNextForConditionalGeneration [[autodoc]] LlavaNextForConditionalGeneration - forward
transformers/docs/source/en/model_doc/llava_next.md/0
{ "file_path": "transformers/docs/source/en/model_doc/llava_next.md", "repo_id": "transformers", "token_count": 2186 }
255
<!--Copyright 2021 NVIDIA Corporation and The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # MegatronBERT ## Overview The MegatronBERT model was proposed in [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro. The abstract from the paper is the following: *Recent work in language modeling demonstrates that training large transformer models advances the state of the art in Natural Language Processing applications. However, very large models can be quite difficult to train due to memory constraints. In this work, we present our techniques for training very large transformer models and implement a simple, efficient intra-layer model parallel approach that enables training transformer models with billions of parameters. Our approach does not require a new compiler or library changes, is orthogonal and complimentary to pipeline model parallelism, and can be fully implemented with the insertion of a few communication operations in native PyTorch. We illustrate this approach by converging transformer based models up to 8.3 billion parameters using 512 GPUs. We sustain 15.1 PetaFLOPs across the entire application with 76% scaling efficiency when compared to a strong single GPU baseline that sustains 39 TeraFLOPs, which is 30% of peak FLOPs. To demonstrate that large language models can further advance the state of the art (SOTA), we train an 8.3 billion parameter transformer language model similar to GPT-2 and a 3.9 billion parameter model similar to BERT. We show that careful attention to the placement of layer normalization in BERT-like models is critical to achieving increased performance as the model size grows. Using the GPT-2 model we achieve SOTA results on the WikiText103 (10.8 compared to SOTA perplexity of 15.8) and LAMBADA (66.5% compared to SOTA accuracy of 63.2%) datasets. Our BERT model achieves SOTA results on the RACE dataset (90.9% compared to SOTA accuracy of 89.4%).* This model was contributed by [jdemouth](https://huggingface.co/jdemouth). The original code can be found [here](https://github.com/NVIDIA/Megatron-LM). That repository contains a multi-GPU and multi-node implementation of the Megatron Language models. In particular, it contains a hybrid model parallel approach using "tensor parallel" and "pipeline parallel" techniques. ## Usage tips We have provided pretrained [BERT-345M](https://ngc.nvidia.com/catalog/models/nvidia:megatron_bert_345m) checkpoints for use to evaluate or finetuning downstream tasks. To access these checkpoints, first [sign up](https://ngc.nvidia.com/signup) for and setup the NVIDIA GPU Cloud (NGC) Registry CLI. Further documentation for downloading models can be found in the [NGC documentation](https://docs.nvidia.com/dgx/ngc-registry-cli-user-guide/index.html#topic_6_4_1). Alternatively, you can directly download the checkpoints using: BERT-345M-uncased: ```bash wget --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/megatron_bert_345m/versions/v0.1_uncased/zip -O megatron_bert_345m_v0_1_uncased.zip ``` BERT-345M-cased: ```bash wget --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/megatron_bert_345m/versions/v0.1_cased/zip -O megatron_bert_345m_v0_1_cased.zip ``` Once you have obtained the checkpoints from NVIDIA GPU Cloud (NGC), you have to convert them to a format that will easily be loaded by Hugging Face Transformers and our port of the BERT code. The following commands allow you to do the conversion. We assume that the folder `models/megatron_bert` contains `megatron_bert_345m_v0_1_{cased, uncased}.zip` and that the commands are run from inside that folder: ```bash python3 $PATH_TO_TRANSFORMERS/models/megatron_bert/convert_megatron_bert_checkpoint.py megatron_bert_345m_v0_1_uncased.zip ``` ```bash python3 $PATH_TO_TRANSFORMERS/models/megatron_bert/convert_megatron_bert_checkpoint.py megatron_bert_345m_v0_1_cased.zip ``` ## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) - [Question answering task guide](../tasks/question_answering) - [Causal language modeling task guide](../tasks/language_modeling) - [Masked language modeling task guide](../tasks/masked_language_modeling) - [Multiple choice task guide](../tasks/multiple_choice) ## MegatronBertConfig [[autodoc]] MegatronBertConfig ## MegatronBertModel [[autodoc]] MegatronBertModel - forward ## MegatronBertForMaskedLM [[autodoc]] MegatronBertForMaskedLM - forward ## MegatronBertForCausalLM [[autodoc]] MegatronBertForCausalLM - forward ## MegatronBertForNextSentencePrediction [[autodoc]] MegatronBertForNextSentencePrediction - forward ## MegatronBertForPreTraining [[autodoc]] MegatronBertForPreTraining - forward ## MegatronBertForSequenceClassification [[autodoc]] MegatronBertForSequenceClassification - forward ## MegatronBertForMultipleChoice [[autodoc]] MegatronBertForMultipleChoice - forward ## MegatronBertForTokenClassification [[autodoc]] MegatronBertForTokenClassification - forward ## MegatronBertForQuestionAnswering [[autodoc]] MegatronBertForQuestionAnswering - forward
transformers/docs/source/en/model_doc/megatron-bert.md/0
{ "file_path": "transformers/docs/source/en/model_doc/megatron-bert.md", "repo_id": "transformers", "token_count": 1735 }
256
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # MusicGen ## Overview The MusicGen model was proposed in the paper [Simple and Controllable Music Generation](https://arxiv.org/abs/2306.05284) by Jade Copet, Felix Kreuk, Itai Gat, Tal Remez, David Kant, Gabriel Synnaeve, Yossi Adi and Alexandre Défossez. MusicGen is a single stage auto-regressive Transformer model capable of generating high-quality music samples conditioned on text descriptions or audio prompts. The text descriptions are passed through a frozen text encoder model to obtain a sequence of hidden-state representations. MusicGen is then trained to predict discrete audio tokens, or *audio codes*, conditioned on these hidden-states. These audio tokens are then decoded using an audio compression model, such as EnCodec, to recover the audio waveform. Through an efficient token interleaving pattern, MusicGen does not require a self-supervised semantic representation of the text/audio prompts, thus eliminating the need to cascade multiple models to predict a set of codebooks (e.g. hierarchically or upsampling). Instead, it is able to generate all the codebooks in a single forward pass. The abstract from the paper is the following: *We tackle the task of conditional music generation. We introduce MusicGen, a single Language Model (LM) that operates over several streams of compressed discrete music representation, i.e., tokens. Unlike prior work, MusicGen is comprised of a single-stage transformer LM together with efficient token interleaving patterns, which eliminates the need for cascading several models, e.g., hierarchically or upsampling. Following this approach, we demonstrate how MusicGen can generate high-quality samples, while being conditioned on textual description or melodic features, allowing better controls over the generated output. We conduct extensive empirical evaluation, considering both automatic and human studies, showing the proposed approach is superior to the evaluated baselines on a standard text-to-music benchmark. Through ablation studies, we shed light over the importance of each of the components comprising MusicGen.* This model was contributed by [sanchit-gandhi](https://huggingface.co/sanchit-gandhi). The original code can be found [here](https://github.com/facebookresearch/audiocraft). The pre-trained checkpoints can be found on the [Hugging Face Hub](https://huggingface.co/models?sort=downloads&search=facebook%2Fmusicgen-). ## Usage tips - After downloading the original checkpoints from [here](https://github.com/facebookresearch/audiocraft/blob/main/docs/MUSICGEN.md#importing--exporting-models) , you can convert them using the **conversion script** available at `src/transformers/models/musicgen/convert_musicgen_transformers.py` with the following command: ```bash python src/transformers/models/musicgen/convert_musicgen_transformers.py \ --checkpoint small --pytorch_dump_folder /output/path --safe_serialization ``` ## Generation MusicGen is compatible with two generation modes: greedy and sampling. In practice, sampling leads to significantly better results than greedy, thus we encourage sampling mode to be used where possible. Sampling is enabled by default, and can be explicitly specified by setting `do_sample=True` in the call to [`MusicgenForConditionalGeneration.generate`], or by overriding the model's generation config (see below). Generation is limited by the sinusoidal positional embeddings to 30 second inputs. Meaning, MusicGen cannot generate more than 30 seconds of audio (1503 tokens), and input audio passed by Audio-Prompted Generation contributes to this limit so, given an input of 20 seconds of audio, MusicGen cannot generate more than 10 seconds of additional audio. Transformers supports both mono (1-channel) and stereo (2-channel) variants of MusicGen. The mono channel versions generate a single set of codebooks. The stereo versions generate 2 sets of codebooks, 1 for each channel (left/right), and each set of codebooks is decoded independently through the audio compression model. The audio streams for each channel are combined to give the final stereo output. ### Unconditional Generation The inputs for unconditional (or 'null') generation can be obtained through the method [`MusicgenForConditionalGeneration.get_unconditional_inputs`]: ```python >>> from transformers import MusicgenForConditionalGeneration >>> model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small") >>> unconditional_inputs = model.get_unconditional_inputs(num_samples=1) >>> audio_values = model.generate(**unconditional_inputs, do_sample=True, max_new_tokens=256) ``` The audio outputs are a three-dimensional Torch tensor of shape `(batch_size, num_channels, sequence_length)`. To listen to the generated audio samples, you can either play them in an ipynb notebook: ```python from IPython.display import Audio sampling_rate = model.config.audio_encoder.sampling_rate Audio(audio_values[0].numpy(), rate=sampling_rate) ``` Or save them as a `.wav` file using a third-party library, e.g. `scipy`: ```python >>> import scipy >>> sampling_rate = model.config.audio_encoder.sampling_rate >>> scipy.io.wavfile.write("musicgen_out.wav", rate=sampling_rate, data=audio_values[0, 0].numpy()) ``` ### Text-Conditional Generation The model can generate an audio sample conditioned on a text prompt through use of the [`MusicgenProcessor`] to pre-process the inputs: ```python >>> from transformers import AutoProcessor, MusicgenForConditionalGeneration >>> processor = AutoProcessor.from_pretrained("facebook/musicgen-small") >>> model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small") >>> inputs = processor( ... text=["80s pop track with bassy drums and synth", "90s rock song with loud guitars and heavy drums"], ... padding=True, ... return_tensors="pt", ... ) >>> audio_values = model.generate(**inputs, do_sample=True, guidance_scale=3, max_new_tokens=256) ``` The `guidance_scale` is used in classifier free guidance (CFG), setting the weighting between the conditional logits (which are predicted from the text prompts) and the unconditional logits (which are predicted from an unconditional or 'null' prompt). Higher guidance scale encourages the model to generate samples that are more closely linked to the input prompt, usually at the expense of poorer audio quality. CFG is enabled by setting `guidance_scale > 1`. For best results, use `guidance_scale=3` (default). ### Audio-Prompted Generation The same [`MusicgenProcessor`] can be used to pre-process an audio prompt that is used for audio continuation. In the following example, we load an audio file using the 🤗 Datasets library, which can be pip installed through the command below: ```bash pip install --upgrade pip pip install datasets[audio] ``` ```python >>> from transformers import AutoProcessor, MusicgenForConditionalGeneration >>> from datasets import load_dataset >>> processor = AutoProcessor.from_pretrained("facebook/musicgen-small") >>> model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small") >>> dataset = load_dataset("sanchit-gandhi/gtzan", split="train", streaming=True) >>> sample = next(iter(dataset))["audio"] >>> # take the first half of the audio sample >>> sample["array"] = sample["array"][: len(sample["array"]) // 2] >>> inputs = processor( ... audio=sample["array"], ... sampling_rate=sample["sampling_rate"], ... text=["80s blues track with groovy saxophone"], ... padding=True, ... return_tensors="pt", ... ) >>> audio_values = model.generate(**inputs, do_sample=True, guidance_scale=3, max_new_tokens=256) ``` For batched audio-prompted generation, the generated `audio_values` can be post-processed to remove padding by using the [`MusicgenProcessor`] class: ```python >>> from transformers import AutoProcessor, MusicgenForConditionalGeneration >>> from datasets import load_dataset >>> processor = AutoProcessor.from_pretrained("facebook/musicgen-small") >>> model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small") >>> dataset = load_dataset("sanchit-gandhi/gtzan", split="train", streaming=True) >>> sample = next(iter(dataset))["audio"] >>> # take the first quarter of the audio sample >>> sample_1 = sample["array"][: len(sample["array"]) // 4] >>> # take the first half of the audio sample >>> sample_2 = sample["array"][: len(sample["array"]) // 2] >>> inputs = processor( ... audio=[sample_1, sample_2], ... sampling_rate=sample["sampling_rate"], ... text=["80s blues track with groovy saxophone", "90s rock song with loud guitars and heavy drums"], ... padding=True, ... return_tensors="pt", ... ) >>> audio_values = model.generate(**inputs, do_sample=True, guidance_scale=3, max_new_tokens=256) >>> # post-process to remove padding from the batched audio >>> audio_values = processor.batch_decode(audio_values, padding_mask=inputs.padding_mask) ``` ### Generation Configuration The default parameters that control the generation process, such as sampling, guidance scale and number of generated tokens, can be found in the model's generation config, and updated as desired: ```python >>> from transformers import MusicgenForConditionalGeneration >>> model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small") >>> # inspect the default generation config >>> model.generation_config >>> # increase the guidance scale to 4.0 >>> model.generation_config.guidance_scale = 4.0 >>> # decrease the max length to 256 tokens >>> model.generation_config.max_length = 256 ``` Note that any arguments passed to the generate method will **supersede** those in the generation config, so setting `do_sample=False` in the call to generate will supersede the setting of `model.generation_config.do_sample` in the generation config. ## Model Structure The MusicGen model can be de-composed into three distinct stages: 1. Text encoder: maps the text inputs to a sequence of hidden-state representations. The pre-trained MusicGen models use a frozen text encoder from either T5 or Flan-T5 2. MusicGen decoder: a language model (LM) that auto-regressively generates audio tokens (or codes) conditional on the encoder hidden-state representations 3. Audio encoder/decoder: used to encode an audio prompt to use as prompt tokens, and recover the audio waveform from the audio tokens predicted by the decoder Thus, the MusicGen model can either be used as a standalone decoder model, corresponding to the class [`MusicgenForCausalLM`], or as a composite model that includes the text encoder and audio encoder/decoder, corresponding to the class [`MusicgenForConditionalGeneration`]. If only the decoder needs to be loaded from the pre-trained checkpoint, it can be loaded by first specifying the correct config, or be accessed through the `.decoder` attribute of the composite model: ```python >>> from transformers import AutoConfig, MusicgenForCausalLM, MusicgenForConditionalGeneration >>> # Option 1: get decoder config and pass to `.from_pretrained` >>> decoder_config = AutoConfig.from_pretrained("facebook/musicgen-small").decoder >>> decoder = MusicgenForCausalLM.from_pretrained("facebook/musicgen-small", **decoder_config) >>> # Option 2: load the entire composite model, but only return the decoder >>> decoder = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small").decoder ``` Since the text encoder and audio encoder/decoder models are frozen during training, the MusicGen decoder [`MusicgenForCausalLM`] can be trained standalone on a dataset of encoder hidden-states and audio codes. For inference, the trained decoder can be combined with the frozen text encoder and audio encoder/decoders to recover the composite [`MusicgenForConditionalGeneration`] model. Tips: * MusicGen is trained on the 32kHz checkpoint of Encodec. You should ensure you use a compatible version of the Encodec model. * Sampling mode tends to deliver better results than greedy - you can toggle sampling with the variable `do_sample` in the call to [`MusicgenForConditionalGeneration.generate`] ## MusicgenDecoderConfig [[autodoc]] MusicgenDecoderConfig ## MusicgenConfig [[autodoc]] MusicgenConfig ## MusicgenProcessor [[autodoc]] MusicgenProcessor ## MusicgenModel [[autodoc]] MusicgenModel - forward ## MusicgenForCausalLM [[autodoc]] MusicgenForCausalLM - forward ## MusicgenForConditionalGeneration [[autodoc]] MusicgenForConditionalGeneration - forward
transformers/docs/source/en/model_doc/musicgen.md/0
{ "file_path": "transformers/docs/source/en/model_doc/musicgen.md", "repo_id": "transformers", "token_count": 3592 }
257
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # RAG <div class="flex flex-wrap space-x-1"> <a href="https://huggingface.co/models?filter=rag"> <img alt="Models" src="https://img.shields.io/badge/All_model_pages-rag-blueviolet"> </a> </div> ## Overview Retrieval-augmented generation ("RAG") models combine the powers of pretrained dense retrieval (DPR) and sequence-to-sequence models. RAG models retrieve documents, pass them to a seq2seq model, then marginalize to generate outputs. The retriever and seq2seq modules are initialized from pretrained models, and fine-tuned jointly, allowing both retrieval and generation to adapt to downstream tasks. It is based on the paper [Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks](https://arxiv.org/abs/2005.11401) by Patrick Lewis, Ethan Perez, Aleksandara Piktus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Heinrich Küttler, Mike Lewis, Wen-tau Yih, Tim Rocktäschel, Sebastian Riedel, Douwe Kiela. The abstract from the paper is the following: *Large pre-trained language models have been shown to store factual knowledge in their parameters, and achieve state-of-the-art results when fine-tuned on downstream NLP tasks. However, their ability to access and precisely manipulate knowledge is still limited, and hence on knowledge-intensive tasks, their performance lags behind task-specific architectures. Additionally, providing provenance for their decisions and updating their world knowledge remain open research problems. Pre-trained models with a differentiable access mechanism to explicit nonparametric memory can overcome this issue, but have so far been only investigated for extractive downstream tasks. We explore a general-purpose fine-tuning recipe for retrieval-augmented generation (RAG) — models which combine pre-trained parametric and non-parametric memory for language generation. We introduce RAG models where the parametric memory is a pre-trained seq2seq model and the non-parametric memory is a dense vector index of Wikipedia, accessed with a pre-trained neural retriever. We compare two RAG formulations, one which conditions on the same retrieved passages across the whole generated sequence, the other can use different passages per token. We fine-tune and evaluate our models on a wide range of knowledge-intensive NLP tasks and set the state-of-the-art on three open domain QA tasks, outperforming parametric seq2seq models and task-specific retrieve-and-extract architectures. For language generation tasks, we find that RAG models generate more specific, diverse and factual language than a state-of-the-art parametric-only seq2seq baseline.* This model was contributed by [ola13](https://huggingface.co/ola13). ## Usage tips Retrieval-augmented generation ("RAG") models combine the powers of pretrained dense retrieval (DPR) and Seq2Seq models. RAG models retrieve docs, pass them to a seq2seq model, then marginalize to generate outputs. The retriever and seq2seq modules are initialized from pretrained models, and fine-tuned jointly, allowing both retrieval and generation to adapt to downstream tasks. ## RagConfig [[autodoc]] RagConfig ## RagTokenizer [[autodoc]] RagTokenizer ## Rag specific outputs [[autodoc]] models.rag.modeling_rag.RetrievAugLMMarginOutput [[autodoc]] models.rag.modeling_rag.RetrievAugLMOutput ## RagRetriever [[autodoc]] RagRetriever <frameworkcontent> <pt> ## RagModel [[autodoc]] RagModel - forward ## RagSequenceForGeneration [[autodoc]] RagSequenceForGeneration - forward - generate ## RagTokenForGeneration [[autodoc]] RagTokenForGeneration - forward - generate </pt> <tf> ## TFRagModel [[autodoc]] TFRagModel - call ## TFRagSequenceForGeneration [[autodoc]] TFRagSequenceForGeneration - call - generate ## TFRagTokenForGeneration [[autodoc]] TFRagTokenForGeneration - call - generate </tf> </frameworkcontent>
transformers/docs/source/en/model_doc/rag.md/0
{ "file_path": "transformers/docs/source/en/model_doc/rag.md", "repo_id": "transformers", "token_count": 1273 }
258
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # SegGPT ## Overview The SegGPT model was proposed in [SegGPT: Segmenting Everything In Context](https://arxiv.org/abs/2304.03284) by Xinlong Wang, Xiaosong Zhang, Yue Cao, Wen Wang, Chunhua Shen, Tiejun Huang. SegGPT employs a decoder-only Transformer that can generate a segmentation mask given an input image, a prompt image and its corresponding prompt mask. The model achieves remarkable one-shot results with 56.1 mIoU on COCO-20 and 85.6 mIoU on FSS-1000. The abstract from the paper is the following: *We present SegGPT, a generalist model for segmenting everything in context. We unify various segmentation tasks into a generalist in-context learning framework that accommodates different kinds of segmentation data by transforming them into the same format of images. The training of SegGPT is formulated as an in-context coloring problem with random color mapping for each data sample. The objective is to accomplish diverse tasks according to the context, rather than relying on specific colors. After training, SegGPT can perform arbitrary segmentation tasks in images or videos via in-context inference, such as object instance, stuff, part, contour, and text. SegGPT is evaluated on a broad range of tasks, including few-shot semantic segmentation, video object segmentation, semantic segmentation, and panoptic segmentation. Our results show strong capabilities in segmenting in-domain and out-of* Tips: - One can use [`SegGptImageProcessor`] to prepare image input, prompt and mask to the model. - It's highly advisable to pass `num_labels` (not considering background) during preprocessing and postprocessing with [`SegGptImageProcessor`] for your use case. - When doing inference with [`SegGptForImageSegmentation`] if your `batch_size` is greater than 1 you can use feature ensemble across your images by passing `feature_ensemble=True` in the forward method. Here's how to use the model for one-shot semantic segmentation: ```python import torch from datasets import load_dataset from transformers import SegGptImageProcessor, SegGptForImageSegmentation model_id = "BAAI/seggpt-vit-large" image_processor = SegGptImageProcessor.from_pretrained(checkpoint) model = SegGptForImageSegmentation.from_pretrained(checkpoint) dataset_id = "EduardoPacheco/FoodSeg103" ds = load_dataset(dataset_id, split="train") # Number of labels in FoodSeg103 (not including background) num_labels = 103 image_input = ds[4]["image"] ground_truth = ds[4]["label"] image_prompt = ds[29]["image"] mask_prompt = ds[29]["label"] inputs = image_processor( images=image_input, prompt_images=image_prompt, prompt_masks=mask_prompt, num_labels=num_labels, return_tensors="pt" ) with torch.no_grad(): outputs = model(**inputs) target_sizes = [image_input.size[::-1]] mask = image_processor.post_process_semantic_segmentation(outputs, target_sizes, num_labels=num_labels)[0] ``` This model was contributed by [EduardoPacheco](https://huggingface.co/EduardoPacheco). The original code can be found [here]([(https://github.com/baaivision/Painter/tree/main)). ## SegGptConfig [[autodoc]] SegGptConfig ## SegGptImageProcessor [[autodoc]] SegGptImageProcessor - preprocess - post_process_semantic_segmentation ## SegGptModel [[autodoc]] SegGptModel - forward ## SegGptForImageSegmentation [[autodoc]] SegGptForImageSegmentation - forward
transformers/docs/source/en/model_doc/seggpt.md/0
{ "file_path": "transformers/docs/source/en/model_doc/seggpt.md", "repo_id": "transformers", "token_count": 1204 }
259
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # ViTDet ## Overview The ViTDet model was proposed in [Exploring Plain Vision Transformer Backbones for Object Detection](https://arxiv.org/abs/2203.16527) by Yanghao Li, Hanzi Mao, Ross Girshick, Kaiming He. VitDet leverages the plain [Vision Transformer](vit) for the task of object detection. The abstract from the paper is the following: *We explore the plain, non-hierarchical Vision Transformer (ViT) as a backbone network for object detection. This design enables the original ViT architecture to be fine-tuned for object detection without needing to redesign a hierarchical backbone for pre-training. With minimal adaptations for fine-tuning, our plain-backbone detector can achieve competitive results. Surprisingly, we observe: (i) it is sufficient to build a simple feature pyramid from a single-scale feature map (without the common FPN design) and (ii) it is sufficient to use window attention (without shifting) aided with very few cross-window propagation blocks. With plain ViT backbones pre-trained as Masked Autoencoders (MAE), our detector, named ViTDet, can compete with the previous leading methods that were all based on hierarchical backbones, reaching up to 61.3 AP_box on the COCO dataset using only ImageNet-1K pre-training. We hope our study will draw attention to research on plain-backbone detectors.* This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/facebookresearch/detectron2/tree/main/projects/ViTDet). Tips: - At the moment, only the backbone is available. ## VitDetConfig [[autodoc]] VitDetConfig ## VitDetModel [[autodoc]] VitDetModel - forward
transformers/docs/source/en/model_doc/vitdet.md/0
{ "file_path": "transformers/docs/source/en/model_doc/vitdet.md", "repo_id": "transformers", "token_count": 579 }
260
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # XLM <div class="flex flex-wrap space-x-1"> <a href="https://huggingface.co/models?filter=xlm"> <img alt="Models" src="https://img.shields.io/badge/All_model_pages-xlm-blueviolet"> </a> <a href="https://huggingface.co/spaces/docs-demos/xlm-mlm-en-2048"> <img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"> </a> </div> ## Overview The XLM model was proposed in [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) by Guillaume Lample, Alexis Conneau. It's a transformer pretrained using one of the following objectives: - a causal language modeling (CLM) objective (next token prediction), - a masked language modeling (MLM) objective (BERT-like), or - a Translation Language Modeling (TLM) object (extension of BERT's MLM to multiple language inputs) The abstract from the paper is the following: *Recent studies have demonstrated the efficiency of generative pretraining for English natural language understanding. In this work, we extend this approach to multiple languages and show the effectiveness of cross-lingual pretraining. We propose two methods to learn cross-lingual language models (XLMs): one unsupervised that only relies on monolingual data, and one supervised that leverages parallel data with a new cross-lingual language model objective. We obtain state-of-the-art results on cross-lingual classification, unsupervised and supervised machine translation. On XNLI, our approach pushes the state of the art by an absolute gain of 4.9% accuracy. On unsupervised machine translation, we obtain 34.3 BLEU on WMT'16 German-English, improving the previous state of the art by more than 9 BLEU. On supervised machine translation, we obtain a new state of the art of 38.5 BLEU on WMT'16 Romanian-English, outperforming the previous best approach by more than 4 BLEU. Our code and pretrained models will be made publicly available.* This model was contributed by [thomwolf](https://huggingface.co/thomwolf). The original code can be found [here](https://github.com/facebookresearch/XLM/). ## Usage tips - XLM has many different checkpoints, which were trained using different objectives: CLM, MLM or TLM. Make sure to select the correct objective for your task (e.g. MLM checkpoints are not suitable for generation). - XLM has multilingual checkpoints which leverage a specific `lang` parameter. Check out the [multi-lingual](../multilingual) page for more information. - A transformer model trained on several languages. There are three different type of training for this model and the library provides checkpoints for all of them: * Causal language modeling (CLM) which is the traditional autoregressive training (so this model could be in the previous section as well). One of the languages is selected for each training sample, and the model input is a sentence of 256 tokens, that may span over several documents in one of those languages. * Masked language modeling (MLM) which is like RoBERTa. One of the languages is selected for each training sample, and the model input is a sentence of 256 tokens, that may span over several documents in one of those languages, with dynamic masking of the tokens. * A combination of MLM and translation language modeling (TLM). This consists of concatenating a sentence in two different languages, with random masking. To predict one of the masked tokens, the model can use both, the surrounding context in language 1 and the context given by language 2. ## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) - [Question answering task guide](../tasks/question_answering) - [Causal language modeling task guide](../tasks/language_modeling) - [Masked language modeling task guide](../tasks/masked_language_modeling) - [Multiple choice task guide](../tasks/multiple_choice) ## XLMConfig [[autodoc]] XLMConfig ## XLMTokenizer [[autodoc]] XLMTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## XLM specific outputs [[autodoc]] models.xlm.modeling_xlm.XLMForQuestionAnsweringOutput <frameworkcontent> <pt> ## XLMModel [[autodoc]] XLMModel - forward ## XLMWithLMHeadModel [[autodoc]] XLMWithLMHeadModel - forward ## XLMForSequenceClassification [[autodoc]] XLMForSequenceClassification - forward ## XLMForMultipleChoice [[autodoc]] XLMForMultipleChoice - forward ## XLMForTokenClassification [[autodoc]] XLMForTokenClassification - forward ## XLMForQuestionAnsweringSimple [[autodoc]] XLMForQuestionAnsweringSimple - forward ## XLMForQuestionAnswering [[autodoc]] XLMForQuestionAnswering - forward </pt> <tf> ## TFXLMModel [[autodoc]] TFXLMModel - call ## TFXLMWithLMHeadModel [[autodoc]] TFXLMWithLMHeadModel - call ## TFXLMForSequenceClassification [[autodoc]] TFXLMForSequenceClassification - call ## TFXLMForMultipleChoice [[autodoc]] TFXLMForMultipleChoice - call ## TFXLMForTokenClassification [[autodoc]] TFXLMForTokenClassification - call ## TFXLMForQuestionAnsweringSimple [[autodoc]] TFXLMForQuestionAnsweringSimple - call </tf> </frameworkcontent>
transformers/docs/source/en/model_doc/xlm.md/0
{ "file_path": "transformers/docs/source/en/model_doc/xlm.md", "repo_id": "transformers", "token_count": 1744 }
261
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # GPU inference GPUs are the standard choice of hardware for machine learning, unlike CPUs, because they are optimized for memory bandwidth and parallelism. To keep up with the larger sizes of modern models or to run these large models on existing and older hardware, there are several optimizations you can use to speed up GPU inference. In this guide, you'll learn how to use FlashAttention-2 (a more memory-efficient attention mechanism), BetterTransformer (a PyTorch native fastpath execution), and bitsandbytes to quantize your model to a lower precision. Finally, learn how to use 🤗 Optimum to accelerate inference with ONNX Runtime on Nvidia and AMD GPUs. <Tip> The majority of the optimizations described here also apply to multi-GPU setups! </Tip> ## FlashAttention-2 <Tip> FlashAttention-2 is experimental and may change considerably in future versions. </Tip> [FlashAttention-2](https://huggingface.co/papers/2205.14135) is a faster and more efficient implementation of the standard attention mechanism that can significantly speedup inference by: 1. additionally parallelizing the attention computation over sequence length 2. partitioning the work between GPU threads to reduce communication and shared memory reads/writes between them FlashAttention-2 is currently supported for the following architectures: * [Bark](https://huggingface.co/docs/transformers/model_doc/bark#transformers.BarkModel) * [Bart](https://huggingface.co/docs/transformers/model_doc/bart#transformers.BartModel) * [Cohere](https://huggingface.co/docs/transformers/model_doc/cohere#transformers.CohereModel) * [DistilBert](https://huggingface.co/docs/transformers/model_doc/distilbert#transformers.DistilBertModel) * [Gemma](https://huggingface.co/docs/transformers/model_doc/gemma#transformers.GemmaModel) * [GPTBigCode](https://huggingface.co/docs/transformers/model_doc/gpt_bigcode#transformers.GPTBigCodeModel) * [GPTNeo](https://huggingface.co/docs/transformers/model_doc/gpt_neo#transformers.GPTNeoModel) * [GPTNeoX](https://huggingface.co/docs/transformers/model_doc/gpt_neox#transformers.GPTNeoXModel) * [GPT-J](https://huggingface.co/docs/transformers/model_doc/gptj#transformers.GPTJModel) * [Falcon](https://huggingface.co/docs/transformers/model_doc/falcon#transformers.FalconModel) * [Llama](https://huggingface.co/docs/transformers/model_doc/llama#transformers.LlamaModel) * [Llava](https://huggingface.co/docs/transformers/model_doc/llava) * [Llava-NeXT](https://huggingface.co/docs/transformers/model_doc/llava_next) * [VipLlava](https://huggingface.co/docs/transformers/model_doc/vipllava) * [MBart](https://huggingface.co/docs/transformers/model_doc/mbart#transformers.MBartModel) * [Mistral](https://huggingface.co/docs/transformers/model_doc/mistral#transformers.MistralModel) * [Mixtral](https://huggingface.co/docs/transformers/model_doc/mixtral#transformers.MixtralModel) * [OPT](https://huggingface.co/docs/transformers/model_doc/opt#transformers.OPTModel) * [Phi](https://huggingface.co/docs/transformers/model_doc/phi#transformers.PhiModel) * [StableLm](https://huggingface.co/docs/transformers/model_doc/stablelm#transformers.StableLmModel) * [Starcoder2](https://huggingface.co/docs/transformers/model_doc/starcoder2#transformers.Starcoder2Model) * [Qwen2](https://huggingface.co/docs/transformers/model_doc/qwen2#transformers.Qwen2Model) * [Whisper](https://huggingface.co/docs/transformers/model_doc/whisper#transformers.WhisperModel) You can request to add FlashAttention-2 support for another model by opening a GitHub Issue or Pull Request. Before you begin, make sure you have FlashAttention-2 installed. <hfoptions id="install"> <hfoption id="NVIDIA"> ```bash pip install flash-attn --no-build-isolation ``` We strongly suggest referring to the detailed [installation instructions](https://github.com/Dao-AILab/flash-attention?tab=readme-ov-file#installation-and-features) to learn more about supported hardware and data types! </hfoption> <hfoption id="AMD"> FlashAttention-2 is also supported on AMD GPUs and current support is limited to **Instinct MI210** and **Instinct MI250**. We strongly suggest using this [Dockerfile](https://github.com/huggingface/optimum-amd/tree/main/docker/transformers-pytorch-amd-gpu-flash/Dockerfile) to use FlashAttention-2 on AMD GPUs. </hfoption> </hfoptions> To enable FlashAttention-2, pass the argument `attn_implementation="flash_attention_2"` to [`~AutoModelForCausalLM.from_pretrained`]: ```python import torch from transformers import AutoModelForCausalLM, AutoTokenizer, LlamaForCausalLM model_id = "tiiuae/falcon-7b" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained( model_id, torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2", ) ``` <Tip> FlashAttention-2 can only be used when the model's dtype is `fp16` or `bf16`. Make sure to cast your model to the appropriate dtype and load them on a supported device before using FlashAttention-2. <br> You can also set `use_flash_attention_2=True` to enable FlashAttention-2 but it is deprecated in favor of `attn_implementation="flash_attention_2"`. </Tip> FlashAttention-2 can be combined with other optimization techniques like quantization to further speedup inference. For example, you can combine FlashAttention-2 with 8-bit or 4-bit quantization: ```py import torch from transformers import AutoModelForCausalLM, AutoTokenizer, LlamaForCausalLM model_id = "tiiuae/falcon-7b" tokenizer = AutoTokenizer.from_pretrained(model_id) # load in 8bit model = AutoModelForCausalLM.from_pretrained( model_id, load_in_8bit=True, attn_implementation="flash_attention_2", ) # load in 4bit model = AutoModelForCausalLM.from_pretrained( model_id, load_in_4bit=True, attn_implementation="flash_attention_2", ) ``` ### Expected speedups You can benefit from considerable speedups for inference, especially for inputs with long sequences. However, since FlashAttention-2 does not support computing attention scores with padding tokens, you must manually pad/unpad the attention scores for batched inference when the sequence contains padding tokens. This leads to a significant slowdown for batched generations with padding tokens. To overcome this, you should use FlashAttention-2 without padding tokens in the sequence during training (by packing a dataset or [concatenating sequences](https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_clm.py#L516) until reaching the maximum sequence length). For a single forward pass on [tiiuae/falcon-7b](https://hf.co/tiiuae/falcon-7b) with a sequence length of 4096 and various batch sizes without padding tokens, the expected speedup is: <div style="text-align: center"> <img src="https://huggingface.co/datasets/ybelkada/documentation-images/resolve/main/falcon-7b-inference-large-seqlen.png"> </div> For a single forward pass on [meta-llama/Llama-7b-hf](https://hf.co/meta-llama/Llama-7b-hf) with a sequence length of 4096 and various batch sizes without padding tokens, the expected speedup is: <div style="text-align: center"> <img src="https://huggingface.co/datasets/ybelkada/documentation-images/resolve/main/llama-7b-inference-large-seqlen.png"> </div> For sequences with padding tokens (generating with padding tokens), you need to unpad/pad the input sequences to correctly compute the attention scores. With a relatively small sequence length, a single forward pass creates overhead leading to a small speedup (in the example below, 30% of the input is filled with padding tokens): <div style="text-align: center"> <img src="https://huggingface.co/datasets/ybelkada/documentation-images/resolve/main/llama-2-small-seqlen-padding.png"> </div> But for larger sequence lengths, you can expect even more speedup benefits: <Tip> FlashAttention is more memory efficient, meaning you can train on much larger sequence lengths without running into out-of-memory issues. You can potentially reduce memory usage up to 20x for larger sequence lengths. Take a look at the [flash-attention](https://github.com/Dao-AILab/flash-attention) repository for more details. </Tip> <div style="text-align: center"> <img src="https://huggingface.co/datasets/ybelkada/documentation-images/resolve/main/llama-2-large-seqlen-padding.png"> </div> ## PyTorch scaled dot product attention PyTorch's [`torch.nn.functional.scaled_dot_product_attention`](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention.html) (SDPA) can also call FlashAttention and memory-efficient attention kernels under the hood. SDPA support is currently being added natively in Transformers and is used by default for `torch>=2.1.1` when an implementation is available. For now, Transformers supports SDPA inference and training for the following architectures: * [Bart](https://huggingface.co/docs/transformers/model_doc/bart#transformers.BartModel) * [Cohere](https://huggingface.co/docs/transformers/model_doc/cohere#transformers.CohereModel) * [GPTBigCode](https://huggingface.co/docs/transformers/model_doc/gpt_bigcode#transformers.GPTBigCodeModel) * [Falcon](https://huggingface.co/docs/transformers/model_doc/falcon#transformers.FalconModel) * [Gemma](https://huggingface.co/docs/transformers/model_doc/gemma#transformers.GemmaModel) * [Llama](https://huggingface.co/docs/transformers/model_doc/llama#transformers.LlamaModel) * [Phi](https://huggingface.co/docs/transformers/model_doc/phi#transformers.PhiModel) * [Idefics](https://huggingface.co/docs/transformers/model_doc/idefics#transformers.IdeficsModel) * [Whisper](https://huggingface.co/docs/transformers/model_doc/whisper#transformers.WhisperModel) * [Mistral](https://huggingface.co/docs/transformers/model_doc/mistral#transformers.MistralModel) * [Mixtral](https://huggingface.co/docs/transformers/model_doc/mixtral#transformers.MixtralModel) * [StableLm](https://huggingface.co/docs/transformers/model_doc/stablelm#transformers.StableLmModel) * [Starcoder2](https://huggingface.co/docs/transformers/model_doc/starcoder2#transformers.Starcoder2Model) * [Qwen2](https://huggingface.co/docs/transformers/model_doc/qwen2#transformers.Qwen2Model) <Tip> FlashAttention can only be used for models with the `fp16` or `bf16` torch type, so make sure to cast your model to the appropriate type first. The memory-efficient attention backend is able to handle `fp32` models. </Tip> By default, SDPA selects the most performant kernel available but you can check whether a backend is available in a given setting (hardware, problem size) with [`torch.backends.cuda.sdp_kernel`](https://pytorch.org/docs/master/backends.html#torch.backends.cuda.sdp_kernel) as a context manager: ```diff import torch from transformers import AutoModelForCausalLM, AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m", torch_dtype=torch.float16).to("cuda") # convert the model to BetterTransformer model.to_bettertransformer() input_text = "Hello my dog is cute and" inputs = tokenizer(input_text, return_tensors="pt").to("cuda") + with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False): outputs = model.generate(**inputs) print(tokenizer.decode(outputs[0], skip_special_tokens=True)) ``` If you see a bug with the traceback below, try using the nightly version of PyTorch which may have broader coverage for FlashAttention: ```bash RuntimeError: No available kernel. Aborting execution. # install PyTorch nightly pip3 install -U --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cu118 ``` ## BetterTransformer <Tip warning={true}> Some BetterTransformer features are being upstreamed to Transformers with default support for native `torch.nn.scaled_dot_product_attention`. BetterTransformer still has a wider coverage than the Transformers SDPA integration, but you can expect more and more architectures to natively support SDPA in Transformers. </Tip> <Tip> Check out our benchmarks with BetterTransformer and scaled dot product attention in the [Out of the box acceleration and memory savings of 🤗 decoder models with PyTorch 2.0](https://pytorch.org/blog/out-of-the-box-acceleration/) and learn more about the fastpath execution in the [BetterTransformer](https://medium.com/pytorch/bettertransformer-out-of-the-box-performance-for-huggingface-transformers-3fbe27d50ab2) blog post. </Tip> BetterTransformer accelerates inference with its fastpath (native PyTorch specialized implementation of Transformer functions) execution. The two optimizations in the fastpath execution are: 1. fusion, which combines multiple sequential operations into a single "kernel" to reduce the number of computation steps 2. skipping the inherent sparsity of padding tokens to avoid unnecessary computation with nested tensors BetterTransformer also converts all attention operations to use the more memory-efficient [scaled dot product attention (SDPA)](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention), and it calls optimized kernels like [FlashAttention](https://huggingface.co/papers/2205.14135) under the hood. Before you start, make sure you have 🤗 Optimum [installed](https://huggingface.co/docs/optimum/installation). Then you can enable BetterTransformer with the [`PreTrainedModel.to_bettertransformer`] method: ```python model = model.to_bettertransformer() ``` You can return the original Transformers model with the [`~PreTrainedModel.reverse_bettertransformer`] method. You should use this before saving your model to use the canonical Transformers modeling: ```py model = model.reverse_bettertransformer() model.save_pretrained("saved_model") ``` ## bitsandbytes bitsandbytes is a quantization library that includes support for 4-bit and 8-bit quantization. Quantization reduces your model size compared to its native full precision version, making it easier to fit large models onto GPUs with limited memory. Make sure you have bitsandbytes and 🤗 Accelerate installed: ```bash # these versions support 8-bit and 4-bit pip install bitsandbytes>=0.39.0 accelerate>=0.20.0 # install Transformers pip install transformers ``` ### 4-bit To load a model in 4-bit for inference, use the `load_in_4bit` parameter. The `device_map` parameter is optional, but we recommend setting it to `"auto"` to allow 🤗 Accelerate to automatically and efficiently allocate the model given the available resources in the environment. ```py from transformers import AutoModelForCausalLM model_name = "bigscience/bloom-2b5" model_4bit = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", load_in_4bit=True) ``` To load a model in 4-bit for inference with multiple GPUs, you can control how much GPU RAM you want to allocate to each GPU. For example, to distribute 600MB of memory to the first GPU and 1GB of memory to the second GPU: ```py max_memory_mapping = {0: "600MB", 1: "1GB"} model_name = "bigscience/bloom-3b" model_4bit = AutoModelForCausalLM.from_pretrained( model_name, device_map="auto", load_in_4bit=True, max_memory=max_memory_mapping ) ``` ### 8-bit <Tip> If you're curious and interested in learning more about the concepts underlying 8-bit quantization, read the [Gentle Introduction to 8-bit Matrix Multiplication for transformers at scale using Hugging Face Transformers, Accelerate and bitsandbytes](https://huggingface.co/blog/hf-bitsandbytes-integration) blog post. </Tip> To load a model in 8-bit for inference, use the `load_in_8bit` parameter. The `device_map` parameter is optional, but we recommend setting it to `"auto"` to allow 🤗 Accelerate to automatically and efficiently allocate the model given the available resources in the environment: ```py from transformers import AutoModelForCausalLM model_name = "bigscience/bloom-2b5" model_8bit = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", load_in_8bit=True) ``` If you're loading a model in 8-bit for text generation, you should use the [`~transformers.GenerationMixin.generate`] method instead of the [`Pipeline`] function which is not optimized for 8-bit models and will be slower. Some sampling strategies, like nucleus sampling, are also not supported by the [`Pipeline`] for 8-bit models. You should also place all inputs on the same device as the model: ```py from transformers import AutoModelForCausalLM, AutoTokenizer model_name = "bigscience/bloom-2b5" tokenizer = AutoTokenizer.from_pretrained(model_name) model_8bit = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", load_in_8bit=True) prompt = "Hello, my llama is cute" inputs = tokenizer(prompt, return_tensors="pt").to("cuda") generated_ids = model.generate(**inputs) outputs = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) ``` To load a model in 4-bit for inference with multiple GPUs, you can control how much GPU RAM you want to allocate to each GPU. For example, to distribute 1GB of memory to the first GPU and 2GB of memory to the second GPU: ```py max_memory_mapping = {0: "1GB", 1: "2GB"} model_name = "bigscience/bloom-3b" model_8bit = AutoModelForCausalLM.from_pretrained( model_name, device_map="auto", load_in_8bit=True, max_memory=max_memory_mapping ) ``` <Tip> Feel free to try running a 11 billion parameter [T5 model](https://colab.research.google.com/drive/1YORPWx4okIHXnjW7MSAidXN29mPVNT7F?usp=sharing) or the 3 billion parameter [BLOOM model](https://colab.research.google.com/drive/1qOjXfQIAULfKvZqwCen8-MoWKGdSatZ4?usp=sharing) for inference on Google Colab's free tier GPUs! </Tip> ## 🤗 Optimum <Tip> Learn more details about using ORT with 🤗 Optimum in the [Accelerated inference on NVIDIA GPUs](https://huggingface.co/docs/optimum/onnxruntime/usage_guides/gpu#accelerated-inference-on-nvidia-gpus) and [Accelerated inference on AMD GPUs](https://huggingface.co/docs/optimum/onnxruntime/usage_guides/amdgpu#accelerated-inference-on-amd-gpus) guides. This section only provides a brief and simple example. </Tip> ONNX Runtime (ORT) is a model accelerator that supports accelerated inference on Nvidia GPUs, and AMD GPUs that use [ROCm](https://www.amd.com/en/products/software/rocm.html) stack. ORT uses optimization techniques like fusing common operations into a single node and constant folding to reduce the number of computations performed and speedup inference. ORT also places the most computationally intensive operations on the GPU and the rest on the CPU to intelligently distribute the workload between the two devices. ORT is supported by 🤗 Optimum which can be used in 🤗 Transformers. You'll need to use an [`~optimum.onnxruntime.ORTModel`] for the task you're solving, and specify the `provider` parameter which can be set to either [`CUDAExecutionProvider`](https://huggingface.co/docs/optimum/onnxruntime/usage_guides/gpu#cudaexecutionprovider), [`ROCMExecutionProvider`](https://huggingface.co/docs/optimum/onnxruntime/usage_guides/amdgpu) or [`TensorrtExecutionProvider`](https://huggingface.co/docs/optimum/onnxruntime/usage_guides/gpu#tensorrtexecutionprovider). If you want to load a model that was not yet exported to ONNX, you can set `export=True` to convert your model on-the-fly to the ONNX format: ```py from optimum.onnxruntime import ORTModelForSequenceClassification ort_model = ORTModelForSequenceClassification.from_pretrained( "distilbert/distilbert-base-uncased-finetuned-sst-2-english", export=True, provider="CUDAExecutionProvider", ) ``` Now you're free to use the model for inference: ```py from optimum.pipelines import pipeline from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased-finetuned-sst-2-english") pipeline = pipeline(task="text-classification", model=ort_model, tokenizer=tokenizer, device="cuda:0") result = pipeline("Both the music and visual were astounding, not to mention the actors performance.") ``` ## Combine optimizations It is often possible to combine several of the optimization techniques described above to get the best inference performance possible for your model. For example, you can load a model in 4-bit, and then enable BetterTransformer with FlashAttention: ```py import torch from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig # load model in 4-bit quantization_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16 ) tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m", quantization_config=quantization_config) # enable BetterTransformer model = model.to_bettertransformer() input_text = "Hello my dog is cute and" inputs = tokenizer(input_text, return_tensors="pt").to("cuda") # enable FlashAttention with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False): outputs = model.generate(**inputs) print(tokenizer.decode(outputs[0], skip_special_tokens=True)) ```
transformers/docs/source/en/perf_infer_gpu_one.md/0
{ "file_path": "transformers/docs/source/en/perf_infer_gpu_one.md", "repo_id": "transformers", "token_count": 6737 }
262
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Quick tour [[open-in-colab]] Get up and running with 🤗 Transformers! Whether you're a developer or an everyday user, this quick tour will help you get started and show you how to use the [`pipeline`] for inference, load a pretrained model and preprocessor with an [AutoClass](./model_doc/auto), and quickly train a model with PyTorch or TensorFlow. If you're a beginner, we recommend checking out our tutorials or [course](https://huggingface.co/course/chapter1/1) next for more in-depth explanations of the concepts introduced here. Before you begin, make sure you have all the necessary libraries installed: ```bash !pip install transformers datasets ``` You'll also need to install your preferred machine learning framework: <frameworkcontent> <pt> ```bash pip install torch ``` </pt> <tf> ```bash pip install tensorflow ``` </tf> </frameworkcontent> ## Pipeline <Youtube id="tiZFewofSLM"/> The [`pipeline`] is the easiest and fastest way to use a pretrained model for inference. You can use the [`pipeline`] out-of-the-box for many tasks across different modalities, some of which are shown in the table below: <Tip> For a complete list of available tasks, check out the [pipeline API reference](./main_classes/pipelines). </Tip> | **Task** | **Description** | **Modality** | **Pipeline identifier** | |------------------------------|--------------------------------------------------------------------------------------------------------------|-----------------|-----------------------------------------------| | Text classification | assign a label to a given sequence of text | NLP | pipeline(task=“sentiment-analysis”) | | Text generation | generate text given a prompt | NLP | pipeline(task=“text-generation”) | | Summarization | generate a summary of a sequence of text or document | NLP | pipeline(task=“summarization”) | | Image classification | assign a label to an image | Computer vision | pipeline(task=“image-classification”) | | Image segmentation | assign a label to each individual pixel of an image (supports semantic, panoptic, and instance segmentation) | Computer vision | pipeline(task=“image-segmentation”) | | Object detection | predict the bounding boxes and classes of objects in an image | Computer vision | pipeline(task=“object-detection”) | | Audio classification | assign a label to some audio data | Audio | pipeline(task=“audio-classification”) | | Automatic speech recognition | transcribe speech into text | Audio | pipeline(task=“automatic-speech-recognition”) | | Visual question answering | answer a question about the image, given an image and a question | Multimodal | pipeline(task=“vqa”) | | Document question answering | answer a question about the document, given a document and a question | Multimodal | pipeline(task="document-question-answering") | | Image captioning | generate a caption for a given image | Multimodal | pipeline(task="image-to-text") | Start by creating an instance of [`pipeline`] and specifying a task you want to use it for. In this guide, you'll use the [`pipeline`] for sentiment analysis as an example: ```py >>> from transformers import pipeline >>> classifier = pipeline("sentiment-analysis") ``` The [`pipeline`] downloads and caches a default [pretrained model](https://huggingface.co/distilbert/distilbert-base-uncased-finetuned-sst-2-english) and tokenizer for sentiment analysis. Now you can use the `classifier` on your target text: ```py >>> classifier("We are very happy to show you the 🤗 Transformers library.") [{'label': 'POSITIVE', 'score': 0.9998}] ``` If you have more than one input, pass your inputs as a list to the [`pipeline`] to return a list of dictionaries: ```py >>> results = classifier(["We are very happy to show you the 🤗 Transformers library.", "We hope you don't hate it."]) >>> for result in results: ... print(f"label: {result['label']}, with score: {round(result['score'], 4)}") label: POSITIVE, with score: 0.9998 label: NEGATIVE, with score: 0.5309 ``` The [`pipeline`] can also iterate over an entire dataset for any task you like. For this example, let's choose automatic speech recognition as our task: ```py >>> import torch >>> from transformers import pipeline >>> speech_recognizer = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h") ``` Load an audio dataset (see the 🤗 Datasets [Quick Start](https://huggingface.co/docs/datasets/quickstart#audio) for more details) you'd like to iterate over. For example, load the [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) dataset: ```py >>> from datasets import load_dataset, Audio >>> dataset = load_dataset("PolyAI/minds14", name="en-US", split="train") # doctest: +IGNORE_RESULT ``` You need to make sure the sampling rate of the dataset matches the sampling rate [`facebook/wav2vec2-base-960h`](https://huggingface.co/facebook/wav2vec2-base-960h) was trained on: ```py >>> dataset = dataset.cast_column("audio", Audio(sampling_rate=speech_recognizer.feature_extractor.sampling_rate)) ``` The audio files are automatically loaded and resampled when calling the `"audio"` column. Extract the raw waveform arrays from the first 4 samples and pass it as a list to the pipeline: ```py >>> result = speech_recognizer(dataset[:4]["audio"]) >>> print([d["text"] for d in result]) ['I WOULD LIKE TO SET UP A JOINT ACCOUNT WITH MY PARTNER HOW DO I PROCEED WITH DOING THAT', "FONDERING HOW I'D SET UP A JOIN TO HELL T WITH MY WIFE AND WHERE THE AP MIGHT BE", "I I'D LIKE TOY SET UP A JOINT ACCOUNT WITH MY PARTNER I'M NOT SEEING THE OPTION TO DO IT ON THE APSO I CALLED IN TO GET SOME HELP CAN I JUST DO IT OVER THE PHONE WITH YOU AND GIVE YOU THE INFORMATION OR SHOULD I DO IT IN THE AP AN I'M MISSING SOMETHING UQUETTE HAD PREFERRED TO JUST DO IT OVER THE PHONE OF POSSIBLE THINGS", 'HOW DO I FURN A JOINA COUT'] ``` For larger datasets where the inputs are big (like in speech or vision), you'll want to pass a generator instead of a list to load all the inputs in memory. Take a look at the [pipeline API reference](./main_classes/pipelines) for more information. ### Use another model and tokenizer in the pipeline The [`pipeline`] can accommodate any model from the [Hub](https://huggingface.co/models), making it easy to adapt the [`pipeline`] for other use-cases. For example, if you'd like a model capable of handling French text, use the tags on the Hub to filter for an appropriate model. The top filtered result returns a multilingual [BERT model](https://huggingface.co/nlptown/bert-base-multilingual-uncased-sentiment) finetuned for sentiment analysis you can use for French text: ```py >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" ``` <frameworkcontent> <pt> Use [`AutoModelForSequenceClassification`] and [`AutoTokenizer`] to load the pretrained model and it's associated tokenizer (more on an `AutoClass` in the next section): ```py >>> from transformers import AutoTokenizer, AutoModelForSequenceClassification >>> model = AutoModelForSequenceClassification.from_pretrained(model_name) >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` </pt> <tf> Use [`TFAutoModelForSequenceClassification`] and [`AutoTokenizer`] to load the pretrained model and it's associated tokenizer (more on an `TFAutoClass` in the next section): ```py >>> from transformers import AutoTokenizer, TFAutoModelForSequenceClassification >>> model = TFAutoModelForSequenceClassification.from_pretrained(model_name) >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` </tf> </frameworkcontent> Specify the model and tokenizer in the [`pipeline`], and now you can apply the `classifier` on French text: ```py >>> classifier = pipeline("sentiment-analysis", model=model, tokenizer=tokenizer) >>> classifier("Nous sommes très heureux de vous présenter la bibliothèque 🤗 Transformers.") [{'label': '5 stars', 'score': 0.7273}] ``` If you can't find a model for your use-case, you'll need to finetune a pretrained model on your data. Take a look at our [finetuning tutorial](./training) to learn how. Finally, after you've finetuned your pretrained model, please consider [sharing](./model_sharing) the model with the community on the Hub to democratize machine learning for everyone! 🤗 ## AutoClass <Youtube id="AhChOFRegn4"/> Under the hood, the [`AutoModelForSequenceClassification`] and [`AutoTokenizer`] classes work together to power the [`pipeline`] you used above. An [AutoClass](./model_doc/auto) is a shortcut that automatically retrieves the architecture of a pretrained model from its name or path. You only need to select the appropriate `AutoClass` for your task and it's associated preprocessing class. Let's return to the example from the previous section and see how you can use the `AutoClass` to replicate the results of the [`pipeline`]. ### AutoTokenizer A tokenizer is responsible for preprocessing text into an array of numbers as inputs to a model. There are multiple rules that govern the tokenization process, including how to split a word and at what level words should be split (learn more about tokenization in the [tokenizer summary](./tokenizer_summary)). The most important thing to remember is you need to instantiate a tokenizer with the same model name to ensure you're using the same tokenization rules a model was pretrained with. Load a tokenizer with [`AutoTokenizer`]: ```py >>> from transformers import AutoTokenizer >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` Pass your text to the tokenizer: ```py >>> encoding = tokenizer("We are very happy to show you the 🤗 Transformers library.") >>> print(encoding) {'input_ids': [101, 11312, 10320, 12495, 19308, 10114, 11391, 10855, 10103, 100, 58263, 13299, 119, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} ``` The tokenizer returns a dictionary containing: * [input_ids](./glossary#input-ids): numerical representations of your tokens. * [attention_mask](.glossary#attention-mask): indicates which tokens should be attended to. A tokenizer can also accept a list of inputs, and pad and truncate the text to return a batch with uniform length: <frameworkcontent> <pt> ```py >>> pt_batch = tokenizer( ... ["We are very happy to show you the 🤗 Transformers library.", "We hope you don't hate it."], ... padding=True, ... truncation=True, ... max_length=512, ... return_tensors="pt", ... ) ``` </pt> <tf> ```py >>> tf_batch = tokenizer( ... ["We are very happy to show you the 🤗 Transformers library.", "We hope you don't hate it."], ... padding=True, ... truncation=True, ... max_length=512, ... return_tensors="tf", ... ) ``` </tf> </frameworkcontent> <Tip> Check out the [preprocess](./preprocessing) tutorial for more details about tokenization, and how to use an [`AutoImageProcessor`], [`AutoFeatureExtractor`] and [`AutoProcessor`] to preprocess image, audio, and multimodal inputs. </Tip> ### AutoModel <frameworkcontent> <pt> 🤗 Transformers provides a simple and unified way to load pretrained instances. This means you can load an [`AutoModel`] like you would load an [`AutoTokenizer`]. The only difference is selecting the correct [`AutoModel`] for the task. For text (or sequence) classification, you should load [`AutoModelForSequenceClassification`]: ```py >>> from transformers import AutoModelForSequenceClassification >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> pt_model = AutoModelForSequenceClassification.from_pretrained(model_name) ``` <Tip> See the [task summary](./task_summary) for tasks supported by an [`AutoModel`] class. </Tip> Now pass your preprocessed batch of inputs directly to the model. You just have to unpack the dictionary by adding `**`: ```py >>> pt_outputs = pt_model(**pt_batch) ``` The model outputs the final activations in the `logits` attribute. Apply the softmax function to the `logits` to retrieve the probabilities: ```py >>> from torch import nn >>> pt_predictions = nn.functional.softmax(pt_outputs.logits, dim=-1) >>> print(pt_predictions) tensor([[0.0021, 0.0018, 0.0115, 0.2121, 0.7725], [0.2084, 0.1826, 0.1969, 0.1755, 0.2365]], grad_fn=<SoftmaxBackward0>) ``` </pt> <tf> 🤗 Transformers provides a simple and unified way to load pretrained instances. This means you can load an [`TFAutoModel`] like you would load an [`AutoTokenizer`]. The only difference is selecting the correct [`TFAutoModel`] for the task. For text (or sequence) classification, you should load [`TFAutoModelForSequenceClassification`]: ```py >>> from transformers import TFAutoModelForSequenceClassification >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(model_name) ``` <Tip> See the [task summary](./task_summary) for tasks supported by an [`AutoModel`] class. </Tip> Now pass your preprocessed batch of inputs directly to the model. You can pass the tensors as-is: ```py >>> tf_outputs = tf_model(tf_batch) ``` The model outputs the final activations in the `logits` attribute. Apply the softmax function to the `logits` to retrieve the probabilities: ```py >>> import tensorflow as tf >>> tf_predictions = tf.nn.softmax(tf_outputs.logits, axis=-1) >>> tf_predictions # doctest: +IGNORE_RESULT ``` </tf> </frameworkcontent> <Tip> All 🤗 Transformers models (PyTorch or TensorFlow) output the tensors *before* the final activation function (like softmax) because the final activation function is often fused with the loss. Model outputs are special dataclasses so their attributes are autocompleted in an IDE. The model outputs behave like a tuple or a dictionary (you can index with an integer, a slice or a string) in which case, attributes that are None are ignored. </Tip> ### Save a model <frameworkcontent> <pt> Once your model is fine-tuned, you can save it with its tokenizer using [`PreTrainedModel.save_pretrained`]: ```py >>> pt_save_directory = "./pt_save_pretrained" >>> tokenizer.save_pretrained(pt_save_directory) # doctest: +IGNORE_RESULT >>> pt_model.save_pretrained(pt_save_directory) ``` When you are ready to use the model again, reload it with [`PreTrainedModel.from_pretrained`]: ```py >>> pt_model = AutoModelForSequenceClassification.from_pretrained("./pt_save_pretrained") ``` </pt> <tf> Once your model is fine-tuned, you can save it with its tokenizer using [`TFPreTrainedModel.save_pretrained`]: ```py >>> tf_save_directory = "./tf_save_pretrained" >>> tokenizer.save_pretrained(tf_save_directory) # doctest: +IGNORE_RESULT >>> tf_model.save_pretrained(tf_save_directory) ``` When you are ready to use the model again, reload it with [`TFPreTrainedModel.from_pretrained`]: ```py >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained("./tf_save_pretrained") ``` </tf> </frameworkcontent> One particularly cool 🤗 Transformers feature is the ability to save a model and reload it as either a PyTorch or TensorFlow model. The `from_pt` or `from_tf` parameter can convert the model from one framework to the other: <frameworkcontent> <pt> ```py >>> from transformers import AutoModel >>> tokenizer = AutoTokenizer.from_pretrained(tf_save_directory) >>> pt_model = AutoModelForSequenceClassification.from_pretrained(tf_save_directory, from_tf=True) ``` </pt> <tf> ```py >>> from transformers import TFAutoModel >>> tokenizer = AutoTokenizer.from_pretrained(pt_save_directory) >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(pt_save_directory, from_pt=True) ``` </tf> </frameworkcontent> ## Custom model builds You can modify the model's configuration class to change how a model is built. The configuration specifies a model's attributes, such as the number of hidden layers or attention heads. You start from scratch when you initialize a model from a custom configuration class. The model attributes are randomly initialized, and you'll need to train the model before you can use it to get meaningful results. Start by importing [`AutoConfig`], and then load the pretrained model you want to modify. Within [`AutoConfig.from_pretrained`], you can specify the attribute you want to change, such as the number of attention heads: ```py >>> from transformers import AutoConfig >>> my_config = AutoConfig.from_pretrained("distilbert/distilbert-base-uncased", n_heads=12) ``` <frameworkcontent> <pt> Create a model from your custom configuration with [`AutoModel.from_config`]: ```py >>> from transformers import AutoModel >>> my_model = AutoModel.from_config(my_config) ``` </pt> <tf> Create a model from your custom configuration with [`TFAutoModel.from_config`]: ```py >>> from transformers import TFAutoModel >>> my_model = TFAutoModel.from_config(my_config) ``` </tf> </frameworkcontent> Take a look at the [Create a custom architecture](./create_a_model) guide for more information about building custom configurations. ## Trainer - a PyTorch optimized training loop All models are a standard [`torch.nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) so you can use them in any typical training loop. While you can write your own training loop, 🤗 Transformers provides a [`Trainer`] class for PyTorch, which contains the basic training loop and adds additional functionality for features like distributed training, mixed precision, and more. Depending on your task, you'll typically pass the following parameters to [`Trainer`]: 1. You'll start with a [`PreTrainedModel`] or a [`torch.nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module): ```py >>> from transformers import AutoModelForSequenceClassification >>> model = AutoModelForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` 2. [`TrainingArguments`] contains the model hyperparameters you can change like learning rate, batch size, and the number of epochs to train for. The default values are used if you don't specify any training arguments: ```py >>> from transformers import TrainingArguments >>> training_args = TrainingArguments( ... output_dir="path/to/save/folder/", ... learning_rate=2e-5, ... per_device_train_batch_size=8, ... per_device_eval_batch_size=8, ... num_train_epochs=2, ... ) ``` 3. Load a preprocessing class like a tokenizer, image processor, feature extractor, or processor: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased") ``` 4. Load a dataset: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("rotten_tomatoes") # doctest: +IGNORE_RESULT ``` 5. Create a function to tokenize the dataset: ```py >>> def tokenize_dataset(dataset): ... return tokenizer(dataset["text"]) ``` Then apply it over the entire dataset with [`~datasets.Dataset.map`]: ```py >>> dataset = dataset.map(tokenize_dataset, batched=True) ``` 6. A [`DataCollatorWithPadding`] to create a batch of examples from your dataset: ```py >>> from transformers import DataCollatorWithPadding >>> data_collator = DataCollatorWithPadding(tokenizer=tokenizer) ``` Now gather all these classes in [`Trainer`]: ```py >>> from transformers import Trainer >>> trainer = Trainer( ... model=model, ... args=training_args, ... train_dataset=dataset["train"], ... eval_dataset=dataset["test"], ... tokenizer=tokenizer, ... data_collator=data_collator, ... ) # doctest: +SKIP ``` When you're ready, call [`~Trainer.train`] to start training: ```py >>> trainer.train() # doctest: +SKIP ``` <Tip> For tasks - like translation or summarization - that use a sequence-to-sequence model, use the [`Seq2SeqTrainer`] and [`Seq2SeqTrainingArguments`] classes instead. </Tip> You can customize the training loop behavior by subclassing the methods inside [`Trainer`]. This allows you to customize features such as the loss function, optimizer, and scheduler. Take a look at the [`Trainer`] reference for which methods can be subclassed. The other way to customize the training loop is by using [Callbacks](./main_classes/callbacks). You can use callbacks to integrate with other libraries and inspect the training loop to report on progress or stop the training early. Callbacks do not modify anything in the training loop itself. To customize something like the loss function, you need to subclass the [`Trainer`] instead. ## Train with TensorFlow All models are a standard [`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) so they can be trained in TensorFlow with the [Keras](https://keras.io/) API. 🤗 Transformers provides the [`~TFPreTrainedModel.prepare_tf_dataset`] method to easily load your dataset as a `tf.data.Dataset` so you can start training right away with Keras' [`compile`](https://keras.io/api/models/model_training_apis/#compile-method) and [`fit`](https://keras.io/api/models/model_training_apis/#fit-method) methods. 1. You'll start with a [`TFPreTrainedModel`] or a [`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model): ```py >>> from transformers import TFAutoModelForSequenceClassification >>> model = TFAutoModelForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` 2. Load a preprocessing class like a tokenizer, image processor, feature extractor, or processor: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased") ``` 3. Create a function to tokenize the dataset: ```py >>> def tokenize_dataset(dataset): ... return tokenizer(dataset["text"]) # doctest: +SKIP ``` 4. Apply the tokenizer over the entire dataset with [`~datasets.Dataset.map`] and then pass the dataset and tokenizer to [`~TFPreTrainedModel.prepare_tf_dataset`]. You can also change the batch size and shuffle the dataset here if you'd like: ```py >>> dataset = dataset.map(tokenize_dataset) # doctest: +SKIP >>> tf_dataset = model.prepare_tf_dataset( ... dataset["train"], batch_size=16, shuffle=True, tokenizer=tokenizer ... ) # doctest: +SKIP ``` 5. When you're ready, you can call `compile` and `fit` to start training. Note that Transformers models all have a default task-relevant loss function, so you don't need to specify one unless you want to: ```py >>> from tensorflow.keras.optimizers import Adam >>> model.compile(optimizer=Adam(3e-5)) # No loss argument! >>> model.fit(tf_dataset) # doctest: +SKIP ``` ## What's next? Now that you've completed the 🤗 Transformers quick tour, check out our guides and learn how to do more specific things like writing a custom model, fine-tuning a model for a task, and how to train a model with a script. If you're interested in learning more about 🤗 Transformers core concepts, grab a cup of coffee and take a look at our Conceptual Guides!
transformers/docs/source/en/quicktour.md/0
{ "file_path": "transformers/docs/source/en/quicktour.md", "repo_id": "transformers", "token_count": 8300 }
263
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # How 🤗 Transformers solve tasks In [What 🤗 Transformers can do](task_summary), you learned about natural language processing (NLP), speech and audio, computer vision tasks, and some important applications of them. This page will look closely at how models solve these tasks and explain what's happening under the hood. There are many ways to solve a given task, some models may implement certain techniques or even approach the task from a new angle, but for Transformer models, the general idea is the same. Owing to its flexible architecture, most models are a variant of an encoder, decoder, or encoder-decoder structure. In addition to Transformer models, our library also has several convolutional neural networks (CNNs), which are still used today for computer vision tasks. We'll also explain how a modern CNN works. To explain how tasks are solved, we'll walk through what goes on inside the model to output useful predictions. - [Wav2Vec2](model_doc/wav2vec2) for audio classification and automatic speech recognition (ASR) - [Vision Transformer (ViT)](model_doc/vit) and [ConvNeXT](model_doc/convnext) for image classification - [DETR](model_doc/detr) for object detection - [Mask2Former](model_doc/mask2former) for image segmentation - [GLPN](model_doc/glpn) for depth estimation - [BERT](model_doc/bert) for NLP tasks like text classification, token classification and question answering that use an encoder - [GPT2](model_doc/gpt2) for NLP tasks like text generation that use a decoder - [BART](model_doc/bart) for NLP tasks like summarization and translation that use an encoder-decoder <Tip> Before you go further, it is good to have some basic knowledge of the original Transformer architecture. Knowing how encoders, decoders, and attention work will aid you in understanding how different Transformer models work. If you're just getting started or need a refresher, check out our [course](https://huggingface.co/course/chapter1/4?fw=pt) for more information! </Tip> ## Speech and audio [Wav2Vec2](model_doc/wav2vec2) is a self-supervised model pretrained on unlabeled speech data and finetuned on labeled data for audio classification and automatic speech recognition. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/wav2vec2_architecture.png"/> </div> This model has four main components: 1. A *feature encoder* takes the raw audio waveform, normalizes it to zero mean and unit variance, and converts it into a sequence of feature vectors that are each 20ms long. 2. Waveforms are continuous by nature, so they can't be divided into separate units like a sequence of text can be split into words. That's why the feature vectors are passed to a *quantization module*, which aims to learn discrete speech units. The speech unit is chosen from a collection of codewords, known as a *codebook* (you can think of this as the vocabulary). From the codebook, the vector or speech unit, that best represents the continuous audio input is chosen and forwarded through the model. 3. About half of the feature vectors are randomly masked, and the masked feature vector is fed to a *context network*, which is a Transformer encoder that also adds relative positional embeddings. 4. The pretraining objective of the context network is a *contrastive task*. The model has to predict the true quantized speech representation of the masked prediction from a set of false ones, encouraging the model to find the most similar context vector and quantized speech unit (the target label). Now that wav2vec2 is pretrained, you can finetune it on your data for audio classification or automatic speech recognition! ### Audio classification To use the pretrained model for audio classification, add a sequence classification head on top of the base Wav2Vec2 model. The classification head is a linear layer that accepts the encoder's hidden states. The hidden states represent the learned features from each audio frame which can have varying lengths. To create one vector of fixed-length, the hidden states are pooled first and then transformed into logits over the class labels. The cross-entropy loss is calculated between the logits and target to find the most likely class. Ready to try your hand at audio classification? Check out our complete [audio classification guide](tasks/audio_classification) to learn how to finetune Wav2Vec2 and use it for inference! ### Automatic speech recognition To use the pretrained model for automatic speech recognition, add a language modeling head on top of the base Wav2Vec2 model for [connectionist temporal classification (CTC)](glossary#connectionist-temporal-classification-ctc). The language modeling head is a linear layer that accepts the encoder's hidden states and transforms them into logits. Each logit represents a token class (the number of tokens comes from the task vocabulary). The CTC loss is calculated between the logits and targets to find the most likely sequence of tokens, which are then decoded into a transcription. Ready to try your hand at automatic speech recognition? Check out our complete [automatic speech recognition guide](tasks/asr) to learn how to finetune Wav2Vec2 and use it for inference! ## Computer vision There are two ways to approach computer vision tasks: 1. Split an image into a sequence of patches and process them in parallel with a Transformer. 2. Use a modern CNN, like [ConvNeXT](model_doc/convnext), which relies on convolutional layers but adopts modern network designs. <Tip> A third approach mixes Transformers with convolutions (for example, [Convolutional Vision Transformer](model_doc/cvt) or [LeViT](model_doc/levit)). We won't discuss those because they just combine the two approaches we examine here. </Tip> ViT and ConvNeXT are commonly used for image classification, but for other vision tasks like object detection, segmentation, and depth estimation, we'll look at DETR, Mask2Former and GLPN, respectively; these models are better suited for those tasks. ### Image classification ViT and ConvNeXT can both be used for image classification; the main difference is that ViT uses an attention mechanism while ConvNeXT uses convolutions. #### Transformer [ViT](model_doc/vit) replaces convolutions entirely with a pure Transformer architecture. If you're familiar with the original Transformer, then you're already most of the way toward understanding ViT. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/vit_architecture.jpg"/> </div> The main change ViT introduced was in how images are fed to a Transformer: 1. An image is split into square non-overlapping patches, each of which gets turned into a vector or *patch embedding*. The patch embeddings are generated from a convolutional 2D layer which creates the proper input dimensions (which for a base Transformer is 768 values for each patch embedding). If you had a 224x224 pixel image, you could split it into 196 16x16 image patches. Just like how text is tokenized into words, an image is "tokenized" into a sequence of patches. 2. A *learnable embedding* - a special `[CLS]` token - is added to the beginning of the patch embeddings just like BERT. The final hidden state of the `[CLS]` token is used as the input to the attached classification head; other outputs are ignored. This token helps the model learn how to encode a representation of the image. 3. The last thing to add to the patch and learnable embeddings are the *position embeddings* because the model doesn't know how the image patches are ordered. The position embeddings are also learnable and have the same size as the patch embeddings. Finally, all of the embeddings are passed to the Transformer encoder. 4. The output, specifically only the output with the `[CLS]` token, is passed to a multilayer perceptron head (MLP). ViT's pretraining objective is simply classification. Like other classification heads, the MLP head converts the output into logits over the class labels and calculates the cross-entropy loss to find the most likely class. Ready to try your hand at image classification? Check out our complete [image classification guide](tasks/image_classification) to learn how to finetune ViT and use it for inference! #### CNN <Tip> This section briefly explains convolutions, but it'd be helpful to have a prior understanding of how they change an image's shape and size. If you're unfamiliar with convolutions, check out the [Convolution Neural Networks chapter](https://github.com/fastai/fastbook/blob/master/13_convolutions.ipynb) from the fastai book! </Tip> [ConvNeXT](model_doc/convnext) is a CNN architecture that adopts new and modern network designs to improve performance. However, convolutions are still at the core of the model. From a high-level perspective, a [convolution](glossary#convolution) is an operation where a smaller matrix (*kernel*) is multiplied by a small window of the image pixels. It computes some features from it, such as a particular texture or curvature of a line. Then it slides over to the next window of pixels; the distance the convolution travels is known as the *stride*. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/convolution.gif"/> </div> <small>A basic convolution without padding or stride, taken from <a href="https://arxiv.org/abs/1603.07285">A guide to convolution arithmetic for deep learning.</a></small> You can feed this output to another convolutional layer, and with each successive layer, the network learns more complex and abstract things like hotdogs or rockets. Between convolutional layers, it is common to add a pooling layer to reduce dimensionality and make the model more robust to variations of a feature's position. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/convnext_architecture.png"/> </div> ConvNeXT modernizes a CNN in five ways: 1. Change the number of blocks in each stage and "patchify" an image with a larger stride and corresponding kernel size. The non-overlapping sliding window makes this patchifying strategy similar to how ViT splits an image into patches. 2. A *bottleneck* layer shrinks the number of channels and then restores it because it is faster to do a 1x1 convolution, and you can increase the depth. An inverted bottleneck does the opposite by expanding the number of channels and shrinking them, which is more memory efficient. 3. Replace the typical 3x3 convolutional layer in the bottleneck layer with *depthwise convolution*, which applies a convolution to each input channel separately and then stacks them back together at the end. This widens the network width for improved performance. 4. ViT has a global receptive field which means it can see more of an image at once thanks to its attention mechanism. ConvNeXT attempts to replicate this effect by increasing the kernel size to 7x7. 5. ConvNeXT also makes several layer design changes that imitate Transformer models. There are fewer activation and normalization layers, the activation function is switched to GELU instead of ReLU, and it uses LayerNorm instead of BatchNorm. The output from the convolution blocks is passed to a classification head which converts the outputs into logits and calculates the cross-entropy loss to find the most likely label. ### Object detection [DETR](model_doc/detr), *DEtection TRansformer*, is an end-to-end object detection model that combines a CNN with a Transformer encoder-decoder. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/detr_architecture.png"/> </div> 1. A pretrained CNN *backbone* takes an image, represented by its pixel values, and creates a low-resolution feature map of it. A 1x1 convolution is applied to the feature map to reduce dimensionality and it creates a new feature map with a high-level image representation. Since the Transformer is a sequential model, the feature map is flattened into a sequence of feature vectors that are combined with positional embeddings. 2. The feature vectors are passed to the encoder, which learns the image representations using its attention layers. Next, the encoder hidden states are combined with *object queries* in the decoder. Object queries are learned embeddings that focus on the different regions of an image, and they're updated as they progress through each attention layer. The decoder hidden states are passed to a feedforward network that predicts the bounding box coordinates and class label for each object query, or `no object` if there isn't one. DETR decodes each object query in parallel to output *N* final predictions, where *N* is the number of queries. Unlike a typical autoregressive model that predicts one element at a time, object detection is a set prediction task (`bounding box`, `class label`) that makes *N* predictions in a single pass. 3. DETR uses a *bipartite matching loss* during training to compare a fixed number of predictions with a fixed set of ground truth labels. If there are fewer ground truth labels in the set of *N* labels, then they're padded with a `no object` class. This loss function encourages DETR to find a one-to-one assignment between the predictions and ground truth labels. If either the bounding boxes or class labels aren't correct, a loss is incurred. Likewise, if DETR predicts an object that doesn't exist, it is penalized. This encourages DETR to find other objects in an image instead of focusing on one really prominent object. An object detection head is added on top of DETR to find the class label and the coordinates of the bounding box. There are two components to the object detection head: a linear layer to transform the decoder hidden states into logits over the class labels, and a MLP to predict the bounding box. Ready to try your hand at object detection? Check out our complete [object detection guide](tasks/object_detection) to learn how to finetune DETR and use it for inference! ### Image segmentation [Mask2Former](model_doc/mask2former) is a universal architecture for solving all types of image segmentation tasks. Traditional segmentation models are typically tailored towards a particular subtask of image segmentation, like instance, semantic or panoptic segmentation. Mask2Former frames each of those tasks as a *mask classification* problem. Mask classification groups pixels into *N* segments, and predicts *N* masks and their corresponding class label for a given image. We'll explain how Mask2Former works in this section, and then you can try finetuning SegFormer at the end. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/mask2former_architecture.png"/> </div> There are three main components to Mask2Former: 1. A [Swin](model_doc/swin) backbone accepts an image and creates a low-resolution image feature map from 3 consecutive 3x3 convolutions. 2. The feature map is passed to a *pixel decoder* which gradually upsamples the low-resolution features into high-resolution per-pixel embeddings. The pixel decoder actually generates multi-scale features (contains both low- and high-resolution features) with resolutions 1/32, 1/16, and 1/8th of the original image. 3. Each of these feature maps of differing scales is fed successively to one Transformer decoder layer at a time in order to capture small objects from the high-resolution features. The key to Mask2Former is the *masked attention* mechanism in the decoder. Unlike cross-attention which can attend to the entire image, masked attention only focuses on a certain area of the image. This is faster and leads to better performance because the local features of an image are enough for the model to learn from. 4. Like [DETR](tasks_explained#object-detection), Mask2Former also uses learned object queries and combines them with the image features from the pixel decoder to make a set prediction (`class label`, `mask prediction`). The decoder hidden states are passed into a linear layer and transformed into logits over the class labels. The cross-entropy loss is calculated between the logits and class label to find the most likely one. The mask predictions are generated by combining the pixel-embeddings with the final decoder hidden states. The sigmoid cross-entropy and dice loss is calculated between the logits and the ground truth mask to find the most likely mask. Ready to try your hand at object detection? Check out our complete [image segmentation guide](tasks/semantic_segmentation) to learn how to finetune SegFormer and use it for inference! ### Depth estimation [GLPN](model_doc/glpn), *Global-Local Path Network*, is a Transformer for depth estimation that combines a [SegFormer](model_doc/segformer) encoder with a lightweight decoder. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/glpn_architecture.jpg"/> </div> 1. Like ViT, an image is split into a sequence of patches, except these image patches are smaller. This is better for dense prediction tasks like segmentation or depth estimation. The image patches are transformed into patch embeddings (see the [image classification](#image-classification) section for more details about how patch embeddings are created), which are fed to the encoder. 2. The encoder accepts the patch embeddings, and passes them through several encoder blocks. Each block consists of attention and Mix-FFN layers. The purpose of the latter is to provide positional information. At the end of each encoder block is a *patch merging* layer for creating hierarchical representations. The features of each group of neighboring patches are concatenated, and a linear layer is applied to the concatenated features to reduce the number of patches to a resolution of 1/4. This becomes the input to the next encoder block, where this whole process is repeated until you have image features with resolutions of 1/8, 1/16, and 1/32. 3. A lightweight decoder takes the last feature map (1/32 scale) from the encoder and upsamples it to 1/16 scale. From here, the feature is passed into a *Selective Feature Fusion (SFF)* module, which selects and combines local and global features from an attention map for each feature and then upsamples it to 1/8th. This process is repeated until the decoded features are the same size as the original image. The output is passed through two convolution layers and then a sigmoid activation is applied to predict the depth of each pixel. ## Natural language processing The Transformer was initially designed for machine translation, and since then, it has practically become the default architecture for solving all NLP tasks. Some tasks lend themselves to the Transformer's encoder structure, while others are better suited for the decoder. Still, other tasks make use of both the Transformer's encoder-decoder structure. ### Text classification [BERT](model_doc/bert) is an encoder-only model and is the first model to effectively implement deep bidirectionality to learn richer representations of the text by attending to words on both sides. 1. BERT uses [WordPiece](tokenizer_summary#wordpiece) tokenization to generate a token embedding of the text. To tell the difference between a single sentence and a pair of sentences, a special `[SEP]` token is added to differentiate them. A special `[CLS]` token is added to the beginning of every sequence of text. The final output with the `[CLS]` token is used as the input to the classification head for classification tasks. BERT also adds a segment embedding to denote whether a token belongs to the first or second sentence in a pair of sentences. 2. BERT is pretrained with two objectives: masked language modeling and next-sentence prediction. In masked language modeling, some percentage of the input tokens are randomly masked, and the model needs to predict these. This solves the issue of bidirectionality, where the model could cheat and see all the words and "predict" the next word. The final hidden states of the predicted mask tokens are passed to a feedforward network with a softmax over the vocabulary to predict the masked word. The second pretraining object is next-sentence prediction. The model must predict whether sentence B follows sentence A. Half of the time sentence B is the next sentence, and the other half of the time, sentence B is a random sentence. The prediction, whether it is the next sentence or not, is passed to a feedforward network with a softmax over the two classes (`IsNext` and `NotNext`). 3. The input embeddings are passed through multiple encoder layers to output some final hidden states. To use the pretrained model for text classification, add a sequence classification head on top of the base BERT model. The sequence classification head is a linear layer that accepts the final hidden states and performs a linear transformation to convert them into logits. The cross-entropy loss is calculated between the logits and target to find the most likely label. Ready to try your hand at text classification? Check out our complete [text classification guide](tasks/sequence_classification) to learn how to finetune DistilBERT and use it for inference! ### Token classification To use BERT for token classification tasks like named entity recognition (NER), add a token classification head on top of the base BERT model. The token classification head is a linear layer that accepts the final hidden states and performs a linear transformation to convert them into logits. The cross-entropy loss is calculated between the logits and each token to find the most likely label. Ready to try your hand at token classification? Check out our complete [token classification guide](tasks/token_classification) to learn how to finetune DistilBERT and use it for inference! ### Question answering To use BERT for question answering, add a span classification head on top of the base BERT model. This linear layer accepts the final hidden states and performs a linear transformation to compute the `span` start and end logits corresponding to the answer. The cross-entropy loss is calculated between the logits and the label position to find the most likely span of text corresponding to the answer. Ready to try your hand at question answering? Check out our complete [question answering guide](tasks/question_answering) to learn how to finetune DistilBERT and use it for inference! <Tip> 💡 Notice how easy it is to use BERT for different tasks once it's been pretrained. You only need to add a specific head to the pretrained model to manipulate the hidden states into your desired output! </Tip> ### Text generation [GPT-2](model_doc/gpt2) is a decoder-only model pretrained on a large amount of text. It can generate convincing (though not always true!) text given a prompt and complete other NLP tasks like question answering despite not being explicitly trained to. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/gpt2_architecture.png"/> </div> 1. GPT-2 uses [byte pair encoding (BPE)](tokenizer_summary#bytepair-encoding-bpe) to tokenize words and generate a token embedding. Positional encodings are added to the token embeddings to indicate the position of each token in the sequence. The input embeddings are passed through multiple decoder blocks to output some final hidden state. Within each decoder block, GPT-2 uses a *masked self-attention* layer which means GPT-2 can't attend to future tokens. It is only allowed to attend to tokens on the left. This is different from BERT's [`mask`] token because, in masked self-attention, an attention mask is used to set the score to `0` for future tokens. 2. The output from the decoder is passed to a language modeling head, which performs a linear transformation to convert the hidden states into logits. The label is the next token in the sequence, which are created by shifting the logits to the right by one. The cross-entropy loss is calculated between the shifted logits and the labels to output the next most likely token. GPT-2's pretraining objective is based entirely on [causal language modeling](glossary#causal-language-modeling), predicting the next word in a sequence. This makes GPT-2 especially good at tasks that involve generating text. Ready to try your hand at text generation? Check out our complete [causal language modeling guide](tasks/language_modeling#causal-language-modeling) to learn how to finetune DistilGPT-2 and use it for inference! <Tip> For more information about text generation, check out the [text generation strategies](generation_strategies) guide! </Tip> ### Summarization Encoder-decoder models like [BART](model_doc/bart) and [T5](model_doc/t5) are designed for the sequence-to-sequence pattern of a summarization task. We'll explain how BART works in this section, and then you can try finetuning T5 at the end. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bart_architecture.png"/> </div> 1. BART's encoder architecture is very similar to BERT and accepts a token and positional embedding of the text. BART is pretrained by corrupting the input and then reconstructing it with the decoder. Unlike other encoders with specific corruption strategies, BART can apply any type of corruption. The *text infilling* corruption strategy works the best though. In text infilling, a number of text spans are replaced with a **single** [`mask`] token. This is important because the model has to predict the masked tokens, and it teaches the model to predict the number of missing tokens. The input embeddings and masked spans are passed through the encoder to output some final hidden states, but unlike BERT, BART doesn't add a final feedforward network at the end to predict a word. 2. The encoder's output is passed to the decoder, which must predict the masked tokens and any uncorrupted tokens from the encoder's output. This gives additional context to help the decoder restore the original text. The output from the decoder is passed to a language modeling head, which performs a linear transformation to convert the hidden states into logits. The cross-entropy loss is calculated between the logits and the label, which is just the token shifted to the right. Ready to try your hand at summarization? Check out our complete [summarization guide](tasks/summarization) to learn how to finetune T5 and use it for inference! <Tip> For more information about text generation, check out the [text generation strategies](generation_strategies) guide! </Tip> ### Translation Translation is another example of a sequence-to-sequence task, which means you can use an encoder-decoder model like [BART](model_doc/bart) or [T5](model_doc/t5) to do it. We'll explain how BART works in this section, and then you can try finetuning T5 at the end. BART adapts to translation by adding a separate randomly initialized encoder to map a source language to an input that can be decoded into the target language. This new encoder's embeddings are passed to the pretrained encoder instead of the original word embeddings. The source encoder is trained by updating the source encoder, positional embeddings, and input embeddings with the cross-entropy loss from the model output. The model parameters are frozen in this first step, and all the model parameters are trained together in the second step. BART has since been followed up by a multilingual version, mBART, intended for translation and pretrained on many different languages. Ready to try your hand at translation? Check out our complete [translation guide](tasks/translation) to learn how to finetune T5 and use it for inference! <Tip> For more information about text generation, check out the [text generation strategies](generation_strategies) guide! </Tip>
transformers/docs/source/en/tasks_explained.md/0
{ "file_path": "transformers/docs/source/en/tasks_explained.md", "repo_id": "transformers", "token_count": 6961 }
264
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # BERTología Hay un creciente campo de estudio empeñado en la investigación del funcionamiento interno de los transformers de gran escala como BERT (que algunos llaman "BERTología"). Algunos buenos ejemplos de este campo son: - BERT Rediscovers the Classical NLP Pipeline por Ian Tenney, Dipanjan Das, Ellie Pavlick: https://arxiv.org/abs/1905.05950 - Are Sixteen Heads Really Better than One? por Paul Michel, Omer Levy, Graham Neubig: https://arxiv.org/abs/1905.10650 - What Does BERT Look At? An Analysis of BERT's Attention por Kevin Clark, Urvashi Khandelwal, Omer Levy, Christopher D. Manning: https://arxiv.org/abs/1906.04341 - CAT-probing: A Metric-based Approach to Interpret How Pre-trained Models for Programming Language Attend Code Structure: https://arxiv.org/abs/2210.04633 Para asistir al desarrollo de este nuevo campo, hemos incluido algunas features adicionales en los modelos BERT/GPT/GPT-2 para ayudar a acceder a las representaciones internas, principalmente adaptado de la gran obra de Paul Michel (https://arxiv.org/abs/1905.10650): - accediendo a todos los hidden-states de BERT/GPT/GPT-2, - accediendo a todos los pesos de atención para cada head de BERT/GPT/GPT-2, - adquiriendo los valores de salida y gradientes de las heads para poder computar la métrica de importancia de las heads y realizar la poda de heads como se explica en https://arxiv.org/abs/1905.10650. Para ayudarte a entender y usar estas features, hemos añadido un script específico de ejemplo: [bertology.py](https://github.com/huggingface/transformers/tree/main/examples/research_projects/bertology/run_bertology.py) mientras extraes información y cortas un modelo pre-entrenado en GLUE.
transformers/docs/source/es/bertology.md/0
{ "file_path": "transformers/docs/source/es/bertology.md", "repo_id": "transformers", "token_count": 761 }
265
<!--Copyright 2020 de The HuggingFace Team. Todos los derechos reservados Con licencia bajo la Licencia Apache, Versión 2.0 (la "Licencia"); No puedes usar este archivo excepto de conformidad con la Licencia. Puedes obtener una copia de la Licencia en http://www.apache.org/licenses/LICENSE-2.0 Al menos que sea requrido por la ley aplicable o acordado por escrito, el software distribuido bajo la Licencia es distribuido sobre una BASE "AS IS", SIN GARANTIAS O CONDICIONES DE NINGÚN TIPO. Ver la Licencia para el idioma específico que rige los permisos y limitaciones bajo la Licencia. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Filosofía 🤗 Transformers es una biblioteca construida para: - Los investigadores y educadores de NLP que busquen usar/estudiar/extender modelos transformers a gran escala - Profesionales que quieren optimizar esos modelos y/o ponerlos en producción - Ingenieros que solo quieren descargar un modelo preentrenado y usarlo para resolver una tarea NLP dada. La biblioteca fue diseñada con dos fuertes objetivos en mente: - Que sea tan fácil y rápida de utilizar como sea posible: - Hemos limitado enormemente el número de abstracciones que el usuario tiene que aprender. De hecho, no hay casi abstracciones, solo tres clases estándar necesarias para usar cada modelo: [configuration](main_classes/configuration), [models](main_classes/model) y [tokenizer](main_classes/tokenizer). - Todas estas clases pueden ser inicializadas de forma simple y unificada a partir de ejemplos pre-entrenados mediante el uso de un método `from_pretrained()` común de solicitud que se encargará de descargar (si es necesario), almacenar y cargar la solicitud de clase relacionada y datos asociados (configurations' hyper-parameters, tokenizers' vocabulary, and models' weights) a partir de un control pre-entrenado proporcionado en [Hugging Face Hub](https://huggingface.co/models) o de tu propio control guardado. - Por encima de esas tres clases estándar, la biblioteca proporciona dos APIs: [`pipeline`] para usar rápidamente un modelo (junto a su configuracion y tokenizer asociados) sobre una tarea dada, y [`Trainer`]/`Keras.fit` para entrenar u optimizar de forma rápida un modelo dado. - Como consecuencia, esta biblioteca NO es una caja de herramientas modular de bloques individuales para redes neuronales. Si quieres extender/construir sobre la biblioteca, usa simplemente los módulos regulares de Python/PyTorch/TensorFlow/Keras y emplea las clases estándar de la biblioteca como punto de partida para reutilizar funcionalidades tales como abrir/guardar modelo. - Proporciona modelos modernos con rendimientos lo más parecido posible a los modelos originales: - Proporcionamos al menos un ejemplo para cada arquitectura que reproduce un resultado proporcionado por los autores de dicha arquitectura. - El código normalmente es parecido al código base original, lo cual significa que algún código Pytorch puede no ser tan *pytorchic* como podría ser por haber sido convertido a código TensorFlow, y viceversa. Unos cuantos objetivos adicionales: - Exponer las características internas de los modelos de la forma más coherente posible: - Damos acceso, mediante una sola API, a todos los estados ocultos y pesos de atención. - Tokenizer y el modelo de API base están estandarizados para cambiar fácilmente entre modelos. - Incorporar una selección subjetiva de herramientas de gran potencial para la optimización/investigación de estos modelos: - Una forma sencilla/coherente de añadir nuevos tokens al vocabulario e incrustraciones (embeddings, en inglés) para optimización. - Formas sencillas de camuflar y reducir "transformer heads". - Cambiar fácilmente entre PyTorch y TensorFlow 2.0, permitiendo el entrenamiento usando un marco y la inferencia usando otro. ## Conceptos principales La biblioteca está construida alrededor de tres tipos de clases para cada modelo: - **Model classes** como [`BertModel`], que consisten en más de 30 modelos PyTorch ([torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module)) o modelos Keras ([tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model)) que funcionan con pesos pre-entrenados proporcionados en la biblioteca. - **Configuration classes** como [`BertConfig`], que almacena todos los parámetros necesarios para construir un modelo. No siempre tienes que generarla tu. En particular, si estas usando un modelo pre-entrenado sin ninguna modificación, la creación del modelo se encargará automáticamente de generar la configuración (que es parte del modelo). - **Tokenizer classes** como [`BertTokenizer`], que almacena el vocabulario para cada modelo y proporciona métodos para codificar/decodificar strings en una lista de índices de "token embeddings" para ser empleados en un modelo. Todas estas clases pueden ser generadas a partir de ejemplos pre-entrenados, y guardados localmente usando dos métodos: - `from_pretrained()` permite generar un modelo/configuración/tokenizer a partir de una versión pre-entrenada proporcionada ya sea por la propia biblioteca (los modelos compatibles se pueden encontrar en [Model Hub](https://huggingface.co/models)) o guardados localmente (o en un servidor) por el usuario. - `save_pretrained()` permite guardar un modelo/configuración/tokenizer localmente, de forma que puede ser empleado de nuevo usando `from_pretrained()`.
transformers/docs/source/es/philosophy.md/0
{ "file_path": "transformers/docs/source/es/philosophy.md", "repo_id": "transformers", "token_count": 1964 }
266
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Come aggiungere un modello a 🤗 Transformers? Aggiungere un nuovo modello é spesso difficile e richiede una profonda conoscenza della libreria 🤗 Transformers e anche della repository originale del modello. A Hugging Face cerchiamo di dare alla community sempre piú poteri per aggiungere modelli independentemente. Quindi, per alcuni nuovi modelli che la community vuole aggiungere a 🤗 Transformers, abbiamo creato una specifica *call-for-model-addition* che spiega passo dopo passo come aggiungere il modello richiesto. Con questo *call-for-model-addition* vogliamo insegnare a volenterosi e esperti collaboratori della community come implementare un modello in 🤗 Transformers. Se questo é qualcosa che può interessarvi, siete liberi di controllare l'attuale “calls-for-model-addition” [qui](https://github.com/huggingface/transformers/tree/main/templates/adding_a_new_model/open_model_proposals/README.md) e contattarci. Se il modello sarà selezionato, allora potrete lavorare insieme a un membro di Hugging Face per integrare il modello in 🤗 Transformers. Così facendo, ci guadagnerai in una comprensione totale, sia teorica che pratica, del modello proposto. Inoltre, sarai l'artefice di un importante contributo open-source a 🤗 Transformers. Durante l'implementazione avrai l'opportunità di: - ottenere più comprensione delle best practices in open-source - capire i principi di design di una della librerie NLP più popolari - capire come efficientemente testare complessi modelli NLP - capire come integrare utilit Python come `black`, `ruff`, `make fix-copies` in una libreria per garantire sempre di avere un codice leggibile e pulito Siamo anche contenti se vuoi aggiungere un modello che non può essere trovato nella cartella “calls-for-model-addition”. Le seguenti sezioni spiegano in dettaglio come aggiungere un nuovo modello. Può anche essere molto utile controllare modelli già aggiunti [qui](https://github.com/huggingface/transformers/pulls?q=is%3Apr+label%3A%22PR+for+Model+Addition%22+is%3Aclosed), per capire se richiamano il modello che vorreste aggiungere. Per cominciare, vediamo una panoramica general della libreria Transformers. ## Panoramica generale su 🤗 Transformers Prima di tutto, vediamo in generale 🤗 Transformers. 🤗 Transformers é una libreria molto strutturata, quindi puà essere che a volte ci sia un disaccordo con alcune filosofie della libreria o scelte di design. Dalla nostra esperienza, tuttavia, abbiamo trovato che le scelte fondamentali di design della libreria sono cruciali per usare 🤗 Transformers efficacemente su larga scala, mantenendo i costi a un livello accettabile. Un buon primo punto di partenza per capire al meglio la libreria é leggere la [documentazione sulla nostra filosofia](filosofia) Da qui, ci sono alcune scelte sul modo di lavorare che cerchiamo di applicare a tutti i modelli: - La composizione é generalmente favorita sulla sovra-astrazione - Duplicare il codice non é sempre male, soprattutto se migliora notevolmente la leggibilità e accessibilità del modello - Tutti i files creati per il nuovo modello devono il piu possibile "compatti". Questo vuol dire che quando qualcuno leggerá il codice di uno specifico modello, potrá vedere solo il corrispettivo file `modeling_....py` senza avere multiple dipendenze. La cosa piú importante, é che consideriamo la libreria non solo un mezzo per dare un prodotto, *per esempio* dare la possibilità di usare BERT per inferenza, ma é anche il prodotto reale che noi vogliamo migliorare sempre più. Quindi, quando aggiungi un modello, non sei solo la persona che userà il modello, ma rappresenti anche tutti coloro che leggeranno, cercheranno di capire e modificare il tuo modello. Tenendo questi principi in mente, immergiamoci nel design generale della libreria. ### Panoramica sui modelli Per aggiungere con successo un modello, é importante capire l'interazione tra il tuo modello e la sua configurazione, [`PreTrainedModel`], e [`PretrainedConfig`]. Per dare un esempio, chiameremo il modello da aggiungere a 🤗 Transformers `BrandNewBert`. Diamo un'occhiata: <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers_overview.png"/> Come potete vedere, ci basiamo sull'ereditarietà in 🤗 Transformers, tenendo però il livello di astrazione a un minimo assoluto. Non ci sono mai più di due livelli di astrazione per ogni modello nella libreria. `BrandNewBertModel` eredita da `BrandNewBertPreTrainedModel` che, a sua volta, eredita da [`PreTrainedModel`] - semplice no? Come regola generale, vogliamo essere sicuri che un nuovo modello dipenda solo da [`PreTrainedModel`]. Le funzionalità importanti che sono automaticamente conferite a ogni nuovo modello sono [`~PreTrainedModel.from_pretrained`] e [`~PreTrainedModel.save_pretrained`], che sono usate per serializzazione e deserializzazione. Tutte le altre importanti funzionalità, come ad esempio `BrandNewBertModel.forward` devono essere definite completamente nel nuovo script `modeling_brand_new_bert.py`. Inoltre, vogliamo essere sicuri che un modello con uno specifico head layer, come `BrandNewBertForMaskedLM` non erediti da `BrandNewBertModel`, ma piuttosto usi `BrandNewBertModel` come componente che può essere chiamata nel passaggio forward per mantenere il livello di astrazione basso. Ogni nuovo modello richieste una classe di configurazione, chiamata `BrandNewBertConfig`. Questa configurazione é sempre mantenuta come un attributo in [`PreTrainedModel`], e quindi può essere accessibile tramite l'attributo `config` per tutte le classi che ereditano da `BrandNewBertPreTrainedModel`: ```python model = BrandNewBertModel.from_pretrained("brandy/brand_new_bert") model.config # il modello ha accesso al suo config ``` Analogamente al modello, la configurazione eredita le funzionalità base di serializzazione e deserializzazione da [`PretrainedConfig`]. É da notare che la configurazione e il modello sono sempre serializzati in due formati differenti - il modello é serializzato in un file *pytorch_model.bin* mentre la configurazione con *config.json*. Chiamando [`~PreTrainedModel.save_pretrained`] automaticamente chiamerà [`~PretrainedConfig.save_pretrained`], cosicché sia il modello che la configurazione siano salvati. ### Stile per il codice Quando codifichi un nuovo modello, tieni presente che Transformers ha una sua struttura di fondo come libreria, perciò ci sono alcuni fatti da considerare su come scrivere un codice :-) 1. Il forward pass del tuo modello dev'essere scritto completamente nel file del modello, mentre dev'essere indipendente da altri modelli nella libreria. Se vuoi riutilizzare un blocco di codice da un altro modello, copia e incolla il codice con un commento `# Copied from` in cima al codice (guarda [qui](https://github.com/huggingface/transformers/blob/v4.17.0/src/transformers/models/roberta/modeling_roberta.py#L160) per un ottimo esempio). 2. Il codice dev'essere interamente comprensibile, anche da persone che non parlano in inglese. Questo significa che le variabili devono avere un nome descrittivo e bisogna evitare abbreviazioni. Per esempio, `activation` é molto meglio che `act`. Le variabili con una lettera sono da evitare fortemente, almeno che non sia per un indce in un for loop. 3. Generamente é meglio avere un codice esplicito e piú lungo che un codice corto e magico. 4. Evita di subclassare `nn.Sequential` in Pytorch, puoi subclassare `nn.Module` e scrivere il forward pass, cosicché chiunque può effettuare debug sul tuo codice, aggiungendo print o breaking points. 5. La tua function-signature dev'essere type-annoted. Per il resto, é meglio preferire variabili con un nome accettabile piuttosto che annotazioni per aumentare la comprensione e leggibilità del codice. ### Panoramica sui tokenizers Questa sezione sarà creata al piu presto :-( ## Aggiungere un modello a 🤗 Transformers passo dopo passo Ci sono differenti modi per aggiungere un modello a Hugging Face. Qui trovi una lista di blog posts da parte della community su come aggiungere un modello: 1. [Aggiungere GPT2](https://medium.com/huggingface/from-tensorflow-to-pytorch-265f40ef2a28) scritto da [Thomas](https://huggingface.co/thomwolf) 2. [Aggiungere WMT19 MT](https://huggingface.co/blog/porting-fsmt) scritto da [Stas](https://huggingface.co/stas) Per esperienza, possiamo dirti che quando si aggiunge un modello é meglio tenere a mente le seguenti considerazioni: - Non sfondare una porta giá aperta! La maggior parte del codice che aggiungerai per un nuovo modello 🤗 Transformers esiste già da qualche parte in 🤗 Transformers. Prendi un po' di tempo per trovare codici simili in modelli e tokenizers esistenti e fare un copia-incolla. Ricorda che [grep](https://www.gnu.org/software/grep/) e [rg](https://github.com/BurntSushi/ripgrep) sono tuoi buoni amici. Inoltre, ricorda che puó essere molto probabile che il tokenizer per il tuo modello sia basato sull'implementazione di un altro modello, e il codice del tuo modello stesso su un altro ancora. *Per esempio* il modello FSMT é basato su BART, mentre il tokenizer di FSMT é basato su XLM. - Ricorda che qui é piu una sfida ingegneristica che scientifica. Spendi piú tempo per create un efficiente ambiente di debugging piuttosto che cercare di capire tutti gli aspetti teorici dell'articolo del modello. - Chiedi aiuto se sei in panne! I modelli sono la parte principale di 🤗 Transformers, perciò qui a Hugging Face siamo più che contenti di aiutarti in ogni passo per aggiungere il tuo modello. Non esitare a chiedere se vedi che non riesci a progredire. Di seguito, diamo una ricetta generale per aiutare a portare un modello in 🤗 Transformers. La lista seguente é un sommario di tutto quello che é stato fatto per aggiungere un modello, e può essere usata come To-Do List: - 1. ☐ (Opzionale) Capire gli aspetti teorici del modello - 2. ☐ Preparare l'ambiente dev per transformers - 3. ☐ Preparare l'ambiente debugging della repository originale - 4. ☐ Create uno script che gestisca con successo il forward pass usando la repository originale e checkpoint - 5. ☐ Aggiungere con successo lo scheletro del modello a Transformers - 6. ☐ Convertire i checkpoint original a Transformers checkpoint - 7. ☐ Effettuare con successo la forward pass in Transformers, di modo che dia un output identico al checkpoint originale - 8. ☐ Finire i tests per il modello in Transformers - 9. ☐ Aggiungere con successo Tokenizer in Transformers - 10. ☐ Testare e provare gli integration tests da capo a fine - 11. ☐ Completare i docs - 12. ☐ Caricare i moedl weights all'hub - 13. ☐ Sottomettere una pull request - 14. ☐ (Opzionale) Aggiungere un notebook con una demo Per cominciare di solito consigliamo `BrandNewBert`, partendo dalla teoria, di modo da avere una buona comprensione della teoria generale. TUttavia, se preferisci imparare l'aspetto teorico del modello mentre *lavori* sul modello é ok immergersi direttamente nel codice di `BrandNewBert`. Questa opzione puó essere buona se le tue skills ingegneristiche sono meglio che quelle teoriche, o se il paper `BrandNewBert` ti dá problemi, o se semplicemente ti piace programmare piú che leggere articoli scientifici. ### 1. (Opzionale) Aspetti teorici di BrandNewBert Allora con calma, prendi un po' di tempo per leggere l'articolo su *BrandNewBert* . Sicuramente, alcune sezioni dell'articolo sono molto complesse, ma non preoccuparti! L'obiettivo non é avere una compresione immensa della teoria alla base, ma estrarre le informazioni necessarie per re-implementare con successo il modello in 🤗 Transformers. Quindi, non impazzire sugli aspetti teorici, ma piuttosto focalizzati su quelli pratici, ossia: - Che tipo di modello é *brand_new_bert*? É solo un encoder in stile BERT? O tipo decoder come GPT2? O encoder e decoder stile BART? Dai un'occhiata a [model_summary](model_summary) se non sei famigliare con le differenze tra questi modelli - Quali sono le applicazioni di *brand_new_bert*? Classificazione di testo? Generazione di testo? O per tasks del genere seq2seq? - Quali sono le nuove aggiunte al modello che lo rendono diverso da BERT/GPT-2/BART? - Quali modelli estistenti in [🤗 Transformers models](https://huggingface.co/transformers/#contents) sono molto simili a *brand_new_bert*? - Che tipo di tokenizer si usa in questo caso? Un sentencepiece tokenizer? O un word piece tokenizer? Il tokenizer é lo stesso di BERT o BART? Una volta che senti che hai avuto una bella overview dell'architettura del modello, puoi scrivere senza problemi al team di Hugging Face per ogni domanda che tu hai. Questo puó includere domande sull'architettura del modello, o sull'attention layer, etc. Saremo molto felici di aiutarti :) ### 2. Prepare il tuo ambiente 1. Forka la [repository](https://github.com/huggingface/transformers) cliccando sul tasto ‘Fork' nella pagina della repository. Questo crea una copia del codice nel tuo account GitHub 2. Clona il tuo fork `transfomers` sul tuo dico locale, e aggiungi la repository base come remota: ```bash git clone https://github.com/[your Github handle]/transformers.git cd transformers git remote add upstream https://github.com/huggingface/transformers.git ``` 3. Crea un ambiente di sviluppo, per esempio tramite questo comando: ```bash python -m venv .env source .env/bin/activate pip install -e ".[dev]" ``` quindi torna alla directory principale: ```bash cd .. ``` 4. Attenzione, raccomandiamo di aggiungere la versione di PyTorch di *brand_new_bert* a Transfomers. Per installare PyTorch, basta seguire queste istruzioni https://pytorch.org/get-started/locally/. **Nota bene:** Non c'é bisogno di installare o avere installato CUDA. Il nuovo modello può funzionare senza problemi su una CPU. 5. Per trasferire *brand_new_bert* To port *brand_new_bert* avrai bisogno anche accesso alla sua repository originale: ```bash git clone https://github.com/org_that_created_brand_new_bert_org/brand_new_bert.git cd brand_new_bert pip install -e . ``` Ok, ora hai un ambiente di sviluppo per portare *brand_new_bert* in 🤗 Transformers. ### 3.-4. Provare un pretrained checkpoint usando la repo originale Per cominciare, comincerai a lavorare sulla repo originale di *brand_new_bert*. Come spesso accade, l'implementazione originale é molto sullo stile "ricerca". Questo significa che a volte la documentazione non é al top, magari manca qualche cosa e il codice puó essere difficile da capire. Tuttavia, questa é e dev'essere la motivazione per reimplementare *brand_new_bert*. In Hugging Face, uno degli obiettivi principali é di *mettere le persone sulle spalle dei giganti*, il che si traduce, in questo contesto, di prendere un modello funzionante e riscriverlo e renderlo il piú possibile **accessibile, user-friendly, e leggibile**. Questa é la top motivazione per re-implementare modelli in 🤗 Transformers - cercare di creare nuove complesse tecnologie NLP accessibili a **chiunque**. Riuscire a far girare il modello pretrained originale dalla repository ufficiale é spesso il passo **piu arduo**. Dalla nostra esperienza, é molto importante spendere un p' di tempo per diventare familiari con il codice base originale. Come test, prova a capire i seguenti punti: - Dove si trovano i pretrained weights? - Come caricare i pretrained weights nel modello corrispondente? - Come girare un tokenizer independentemente dal modello? - Prova a tracciare un singolo forward pass, cosicché potrai sapere che classi e funzioni sono richieste per un semplice forward pass. Di solito, dovrai reimplementare queste funzioni e basta - Prova a localizzare i componenti importanti del modello: Dove si trova la classe del modello? Ci sono sotto classi nel modello *per esempio* EngoderModel, DecoderMOdel? Dove si trova il self-attention layer? Ci sono molteplici differenti layer di attention, *per esempio * *self-attention*, *cross-attention*...? - Come puoi fare debug sul modello nell'ambiente originale della repo? Devi aggiungere dei *print* o puoi usare *ipdb* come debugger interattivo, o vabene anche un IDE efficiente per debug come PyCharm? É molto importante che prima di cominciare a trasferire il modello nuovo tu spenda tempo a fare debug del codice originale in maniera **efficiente**! Inoltre, ricorda che tutta la library é open-soruce, quindi non temere di aprire issue o fare una pull request nella repo originale. Tutti coloro che mantengono la repository saranno piú che felici di avere qualcuno che guarda e gioca con i loro codici! A questo punto, sta a te decidere quale ambiente per debug vuoi usare. Noi consilgiamo di evitare setup con GPU, che potrebbero costare assai, lavorare su una CPU puó essere un ottimo punto di partenza per indagare la repository originale e per cominciare a scrivere il codice per 🤗 Transformers. Solo alla fine, quando il modello é stato portato con successo in 🤗 Transformers, allora si potrá verificare il suo funzionamento su GPU. In generale ci sono due possibili ambienti di debug per il testare il modello originale: - [Jupyter notebooks](https://jupyter.org/) / [google colab](https://colab.research.google.com/notebooks/intro.ipynb) - Scripts locali in Python Il vantaggio dei Jupyter notebooks é la possibilità di eseguire cella per cella, il che può essere utile per decomporre tutte le componenti logiche, cosi da a vere un ciclo di debug più rapido, siccome si possono salvare i risultati da steps intermedi. Inoltre, i notebooks spesso sono molto facili da condividere con altri contributors, il che può essere molto utile se vuoi chiedere aiuto al team di Hugging Face. Se sei famigliare con Jupyter notebooks allora racommandiamo di lavorare in questa maniera. Ovviamente se non siete abituati a lavorare con i notebook, questo può essere uno svantaggio nell'usare questa tecnologia, sprecando un sacco di tempo per setup e portare tutto al nuovo ambiente, siccome non potreste neanche usare dei tools di debug come `ipdb`. Per ogni pratica code-base, é sempre meglio come primo step caricare un **piccolo** checkpoint pretrained e cercare di riprodurre un singolo forward pass usando un vettore fittizio di IDs fatti da numeri interi. Un esempio per uno script simile, in pseudocodice é: ```python model = BrandNewBertModel.load_pretrained_checkpoint("/path/to/checkpoint/") input_ids = [0, 4, 5, 2, 3, 7, 9] # vector of input ids original_output = model.predict(input_ids) ``` Per quanto riguarda la strategia di debugging, si può scegliere tra: - Decomporre il modello originario in piccole componenenti e testare ognuna di esse - Decomporre il modello originario nel *tokenizer* originale e nel *modello* originale, testare un forward pass su questi, e usare dei print statement o breakpoints intermedi per verificare Ancora una volta, siete liberi di scegliere quale strategia sia ottimale per voi. Spesso una strategia é piu avvantaggiosa di un'altra, ma tutto dipende dall'code-base originario. Se il code-base vi permette di decomporre il modello in piccole sub-componenenti, *per esempio* se il code-base originario può essere facilmente testato in eager mode, allora vale la pena effettuare un debugging di questo genere. Ricordate che ci sono dei vantaggi nel decidere di prendere la strada piu impegnativa sin da subito: - negli stage piu finali, quando bisognerà comparare il modello originario all'implementazione in Hugging Face, potrete verificare automaticamente ogni componente, individualmente, di modo che ci sia una corrispondenza 1:1 - avrete l'opportunità di decomporre un problema molto grande in piccoli passi, così da strutturare meglio il vostro lavoro - separare il modello in componenti logiche vi aiuterà ad avere un'ottima overview sul design del modello, quindi una migliore comprensione del modello stesso - verso gli stage finali i test fatti componente per componente vi aiuterà ad essere sicuri di non andare avanti e indietro nell'implementazione, così da continuare la modifica del codice senza interruzione Un ottimo esempio di come questo può essere fatto é dato da [Lysandre](https://gist.github.com/LysandreJik/db4c948f6b4483960de5cbac598ad4ed) per il modello ELECTRA Tuttavia, se il code-base originale é molto complesso o le componenti intermedie possono essere testate solo in tramite compilazione, potrebbe richiedere parecchio tempo o addirittura essere impossibile separare il modello in piccole sotto-componenti. Un buon esempio é [MeshTensorFlow di T5](https://github.com/tensorflow/mesh/tree/master/mesh_tensorflow). Questa libreria é molto complessa e non offre un metodo semplice di decomposizione in sotto-componenti. Per simili librerie, potrete fare affidamento ai print statements. In ogni caso, indipendentemente da quale strategia scegliete, la procedura raccomandata é di cominciare a fare debug dal primo layer al layer finale. É consigliato recuperare gli output dai layers, tramite print o sotto-componenti, nel seguente ordine: 1. Recuperare gli IDs di input dati al modello 2. Recuperare i word embeddings 3. Recuperare l'input del primo Transformer layer 4. Recuperare l'output del primo Transformer layer 5. Recuperare l'output dei seguenti `n - 1` Transformer layers 6. Recuperare l'output dell'intero BrandNewBert Model Gli IDs in input dovrebbero essere un arrary di interi, *per esempio* `input_ids = [0, 4, 4, 3, 2, 4, 1, 7, 19]` Gli output dei seguenti layer di solito dovrebbero essere degli array di float multi-dimensionali come questo: ``` [[ [-0.1465, -0.6501, 0.1993, ..., 0.1451, 0.3430, 0.6024], [-0.4417, -0.5920, 0.3450, ..., -0.3062, 0.6182, 0.7132], [-0.5009, -0.7122, 0.4548, ..., -0.3662, 0.6091, 0.7648], ..., [-0.5613, -0.6332, 0.4324, ..., -0.3792, 0.7372, 0.9288], [-0.5416, -0.6345, 0.4180, ..., -0.3564, 0.6992, 0.9191], [-0.5334, -0.6403, 0.4271, ..., -0.3339, 0.6533, 0.8694]]], ``` Ci aspettiamo che ogni modello aggiunto a 🤗 Transformers passi con successo un paio di test d'integrazione. Questo significa che il modello originale e la sua implementazione in 🤗 Transformers abbiano lo stesso output con una precisione di 0.001! Siccome é normale che lo stesso esatto modello, scritto in librerie diverse, possa dare output leggermente diversi, la tolleranza accettata é 1e-3 (0.001). Ricordate che i due modelli devono dare output quasi identici. Dunque, é molto conveniente comparare gli output intermedi di 🤗 Transformers molteplici volte con gli output intermedi del modello originale di *brand_new_bert*. Di seguito vi diamo alcuni consigli per avere un ambiente di debug il piu efficiente possibile: - Trovate la migliore strategia per fare debug dei risultati intermedi. Per esempio, é la repository originale scritta in PyTorch? Se si, molto probabilmente dovrete dedicare un po' di tempo per scrivere degli script piu lunghi, così da decomporre il modello originale in piccole sotto-componenti, in modo da poter recuperare i valori intermedi. Oppure, la repo originale é scritta in Tensorflow 1? Se é così dovrete fare affidamento ai print di Tensorflow [tf.print](https://www.tensorflow.org/api_docs/python/tf/print) per avere i valori intermedi. Altro caso, la repo é scritta in Jax? Allora assicuratevi che il modello non sia in **jit** quanto testate il foward pass, *per esempio* controllate [questo link](https://github.com/google/jax/issues/196). - Usate i più piccoli pretrained checkpoint che potete trovare. Piu piccolo é il checkpoint, piu velocemente sarà il vostro ciclo di debug. Non é efficiente avere un pretrained model così gigante che per il forward pass impieghi piu di 10 secondi. Nel caso in cui i checkpoints siano molto grandi, e non si possa trovare di meglio, allora é buona consuetudine ricorrere a fare un dummy model nel nuovo ambiente, con weights inizializzati random e salvare quei weights per comprare la versione 🤗 Transformers con il vostro modello - Accertatevi di usare la via piu semplice per chiamare il forward pass nella repo originale. Sarebbe opportuno trovare la funzione originaria che chiami **solo** un singolo forward pass, *per esempio* questa funzione spesso viene chiamata `predict`, `evaluate`, `forward` o `__call__`. Siate sicuri di non fare debug su una funzione che chiami `forward` molteplici volte, *per esempio* per generare testo, come `autoregressive_sample`, `generate`. - Cercate di separare la tokenization dal forward pass del modello. Se la repo originaria mostra esempio dove potete dare come input una stringa, provate a cercare dove nella forward call la stringa viene cambiata in input ids e cominciate il debug da questo punto. Questo vi garantisce un ottimo punto di partenza per scrivere un piccolo script personale dove dare gli input al modello, anziche delle stringhe in input. - Assicuratevi che il debugging **non** sia in training mode. Spesso questo potra il modello a dare degli output random, per via dei molteplici dropout layers. Assicuratevi che il forward pass nell'ambiente di debug sia **deterministico**, cosicche i dropout non siano usati. Alternativamente, potete usare *transformers.utils.set_seed* se la vecchia e nuova implementazione sono nello stesso framework. La seguente sezione vi da ulteriori dettagli e accorgimenti su come potete fare tutto questo per *brand_new_bert*. ### 5.-14. Trasferire BrandNewBert in 🤗 Transformers Allora cominciamo ad aggiungere un nuovo codice in 🤗 Transformers. Andate nel vostro fork clone di 🤗 Transformers: ```bash cd transformers ``` Nel caso speciale in cui stiate aggiungendo un modello, la cui architettura sia identica a una di un modello già esistente, dovrete solo aggiugnere uno script di conversione, come descritto [qui](#write-a-conversion-script). In questo caso, potete riutilizzare l'intera architettura del modello gia esistente. Se questo non é il caso, cominciamo con il generare un nuovo modello. Avrete due opzioni: - `transformers-cli add-new-model-like` per aggiungere un nuovo modello come uno che gia esiste - `transformers-cli add-new-model` per aggiungere un nuovo modello da un nostro template (questo assomigliera a BERT o Bart, in base al modello che selezionerete) In entrambi i casi, l'output vi darà un questionario da riempire con informazioni basi sul modello. Il secondo comando richiede di installare un `cookiecutter` - maggiori informazioni [qui](https://github.com/huggingface/transformers/tree/main/templates/adding_a_new_model). **Aprire una Pull Request in main huggingface/transformers repo** Prime di cominciare ad adattare il codice automaticamente generato, aprite una nuova PR come "Work in progress (WIP)", *per esempio* "[WIP] Aggiungere *brand_new_bert*", cosicché il team di Hugging Face possa lavorare al vostro fianco nell' integrare il modello in 🤗 Transformers. Questi sarebbero gli step generali da seguire: 1. Creare un branch dal main branch con un nome descrittivo ```bash git checkout -b add_brand_new_bert ``` 2. Commit del codice automaticamente generato ```bash git add . git commit ``` 3. Fare fetch e rebase del main esistente ```bash git fetch upstream git rebase upstream/main ``` 4. Push dei cambiamenti al proprio account: ```bash git push -u origin a-descriptive-name-for-my-changes ``` 5. Una volte che siete soddisfatti dei nuovi cambiamenti, andate sulla webpage del vostro fork su GitHub. Cliccate "Pull request". Assiuratevi di aggiungere alcuni membri di Hugging Face come reviewers, nel riguardo alla destra della pagina della PR, cosicche il team Hugging Face verrà notificato anche per i futuri cambiamenti. 6. Cambiare la PR a draft, cliccando su "Convert to draft" alla destra della pagina della PR Da quel punto in poi, ricordate di fare commit di ogni progresso e cambiamento, cosicche venga mostrato nella PR. Inoltre, ricordatevi di tenere aggiornato il vostro lavoro con il main esistente: ```bash git fetch upstream git merge upstream/main ``` In generale, tutte le domande che avrete riguardo al modello o l'implementazione dovranno essere fatte nella vostra PR e discusse/risolte nella PR stessa. In questa maniera, il team di Hugging Face sarà sempre notificato quando farete commit di un nuovo codice o se avrete qualche domanda. É molto utile indicare al team di Hugging Face il codice a cui fate riferimento nella domanda, cosicche il team potra facilmente capire il problema o la domanda. Per fare questo andate sulla tab "Files changed", dove potrete vedere tutti i vostri cambiamenti al codice, andate sulla linea dove volete chiedere una domanda, e cliccate sul simbolo "+" per aggiungere un commento. Ogni volta che una domanda o problema é stato risolto, cliccate sul bottone "Resolve". In questa stessa maniera, Hugging Face aprirà domande o commenti nel rivedere il vostro codice. Mi raccomando, chiedete più domande possibili nella pagina della vostra PR. Se avete domande molto generali, non molto utili per il pubblico, siete liberi di chiedere al team Hugging Face direttamente su slack o email. **5. Adattare i codici per brand_new_bert** Per prima cosa, ci focalizzeremo sul modello e non sui tokenizer. Tutto il codice relative dovrebbe trovarsi in `src/transformers/models/brand_new_bert/modeling_brand_new_bert.py` e `src/transformers/models/brand_new_bert/configuration_brand_new_bert.py`. Ora potete finalmente cominciare il codice :). Il codice generato in `src/transformers/models/brand_new_bert/modeling_brand_new_bert.py` avrà sia la stessa architettura di BERT se é un modello encoder-only o BART se é encoder-decoder. A questo punto, ricordatevi cio che avete imparato all'inizio, riguardo agli aspetti teorici del modello: *In che maniera il modello che sto implmementando é diverso da BERT o BART?*. Implementare questi cambi spesso vuol dire cambiare il layer *self-attention*, l'ordine dei layer di normalizzazione e così via... Ancora una volta ripetiamo, é molto utile vedere architetture simili di modelli gia esistenti in Transformers per avere un'idea migliore su come implementare il modello. **Notate** che a questo punto non dovete avere subito un codice tutto corretto o pulito. Piuttosto, é consigliato cominciare con un codice poco pulito, con copia-incolla del codice originale in `src/transformers/models/brand_new_bert/modeling_brand_new_bert.py` fino a che non avrete tutto il codice necessario. In base alla nostra esperienza, é molto meglio aggiungere una prima bozza del codice richiesto e poi correggere e migliorare iterativamente. L'unica cosa essenziale che deve funzionare qui é la seguente instanza: ```python from transformers import BrandNewBertModel, BrandNewBertConfig model = BrandNewBertModel(BrandNewBertConfig()) ``` Questo comando creerà un modello con i parametri di default definiti in `BrandNewBergConfig()` e weights random. Questo garantisce che `init()` di tutte le componenti funzioni correttamente. **6. Scrivere uno script di conversione** Il prossimo step é scrivere uno script per convertire il checkpoint che avete usato per fare debug su *brand_new_berts* nella repo originale in un checkpoint per la nuova implementazione di *brand_new_bert* in 🤗 Transformers. Non é consigliato scrivere lo script di conversione da zero, ma piuttosto cercate e guardate script gia esistenti in 🤗 Transformers, così da trovarne uno simile al vostro modello. Di solito basta fare una copia di uno script gia esistente e adattarlo al vostro caso. Non esistate a chiedre al team di Hugging Face a riguardo. - Se state convertendo un modello da TensorFlow a PyTorch, un ottimo inizio é vedere [questo script di conversione per BERT](https://github.com/huggingface/transformers/blob/7acfa95afb8194f8f9c1f4d2c6028224dbed35a2/src/transformers/models/bert/modeling_bert.py#L91) - Se state convertendo un modello da PyTorch a PyTorch, [lo script di conversione di BART può esservi utile](https://github.com/huggingface/transformers/blob/main/src/transformers/models/bart/convert_bart_original_pytorch_checkpoint_to_pytorch.py) Qui di seguito spiegheremo come i modelli PyTorch salvano i weights per ogni layer e come i nomi dei layer sono definiti. In PyTorch, il nomde del layer é definito dal nome della class attribute che date al layer. Definiamo un modello dummy in PyTorch, chiamato `SimpleModel`: ```python from torch import nn class SimpleModel(nn.Module): def __init__(self): super().__init__() self.dense = nn.Linear(10, 10) self.intermediate = nn.Linear(10, 10) self.layer_norm = nn.LayerNorm(10) ``` Ora possiamo creare un'instanza di questa definizione di modo da inizializzare a random weights: `dense`, `intermediate`, `layer_norm`. Possiamo usare print per vedere l'architettura del modello: ```python model = SimpleModel() print(model) ``` Da cui si ottiene: ``` SimpleModel( (dense): Linear(in_features=10, out_features=10, bias=True) (intermediate): Linear(in_features=10, out_features=10, bias=True) (layer_norm): LayerNorm((10,), eps=1e-05, elementwise_affine=True) ) ``` Si può vedere come i nomi dei layers siano definiti dal nome della class attribute in PyTorch. I valori dei weights di uno specifico layer possono essere visualizzati: ```python print(model.dense.weight.data) ``` ad esempio: ``` tensor([[-0.0818, 0.2207, -0.0749, -0.0030, 0.0045, -0.1569, -0.1598, 0.0212, -0.2077, 0.2157], [ 0.1044, 0.0201, 0.0990, 0.2482, 0.3116, 0.2509, 0.2866, -0.2190, 0.2166, -0.0212], [-0.2000, 0.1107, -0.1999, -0.3119, 0.1559, 0.0993, 0.1776, -0.1950, -0.1023, -0.0447], [-0.0888, -0.1092, 0.2281, 0.0336, 0.1817, -0.0115, 0.2096, 0.1415, -0.1876, -0.2467], [ 0.2208, -0.2352, -0.1426, -0.2636, -0.2889, -0.2061, -0.2849, -0.0465, 0.2577, 0.0402], [ 0.1502, 0.2465, 0.2566, 0.0693, 0.2352, -0.0530, 0.1859, -0.0604, 0.2132, 0.1680], [ 0.1733, -0.2407, -0.1721, 0.1484, 0.0358, -0.0633, -0.0721, -0.0090, 0.2707, -0.2509], [-0.1173, 0.1561, 0.2945, 0.0595, -0.1996, 0.2988, -0.0802, 0.0407, 0.1829, -0.1568], [-0.1164, -0.2228, -0.0403, 0.0428, 0.1339, 0.0047, 0.1967, 0.2923, 0.0333, -0.0536], [-0.1492, -0.1616, 0.1057, 0.1950, -0.2807, -0.2710, -0.1586, 0.0739, 0.2220, 0.2358]]). ``` Nello script di conversione, dovreste riempire quei valori di inizializzazione random con gli stessi weights del corrispondente layer nel checkpoint. *Per esempio* ```python # retrieve matching layer weights, e.g. by # recursive algorithm layer_name = "dense" pretrained_weight = array_of_dense_layer model_pointer = getattr(model, "dense") model_pointer.weight.data = torch.from_numpy(pretrained_weight) ``` Così facendo, dovete verificare che ogni inizializzazione random di un peso del modello PyTorch e il suo corrispondente peso nel pretrained checkpoint siano esattamente gli stessi e uguali in **dimensione/shape e nome**. Per fare questo, é **necessario** aggiungere un `assert` per la dimensione/shape e nome: ```python assert ( model_pointer.weight.shape == pretrained_weight.shape ), f"Pointer shape of random weight {model_pointer.shape} and array shape of checkpoint weight {pretrained_weight.shape} mismatched" ``` Inoltre, dovrete fare il print sia dei nomi che dei weights per essere sicuri che siano gli stessi: ```python logger.info(f"Initialize PyTorch weight {layer_name} from {pretrained_weight.name}") ``` Se la dimensione o il nome non sono uguali, probabilmente avete sbagliato ad assegnare il peso nel checkpoint o nel layer costrutture di 🤗 Transformers. Una dimensione sbagliata può essere dovuta ad un errore nei parameteri in `BrandNewBertConfig()`. Tuttavia, può essere anche che l'implementazione del layer in PyTorch richieda di fare una transposizione della matrice dei weights. Infine, controllate **tutti** che tutti i weights inizializzati e fate print di tutti i weights del checkpoint che non sono stati usati per l'inizializzazione, di modo da essere sicuri che il modello sia correttamente convertito. É normale che ci siano errori nel test di conversione, fai per un errore in `BrandNewBertConfig()`, o un errore nell'architettura in 🤗 Transformers, o un bug in `init()`. Questo step dev'essere fatto tramite iterazioni fino a che non si raggiungano gli stessi valori per i weights. Una volta che il checkpoint é stato correttamente caricato in 🤗 Transformers, potete salvare il modello in una cartella di vostra scelta `/path/to/converted/checkpoint/folder` che contenga sia `pytorch_model.bin` che `config.json`: ```python model.save_pretrained("/path/to/converted/checkpoint/folder") ``` **7. Implementare il forward pass** Una volta che i weights pretrained sono stati correttamente caricati in 🤗 Transformers, dovrete assicurarvi che il forward pass sia correttamente implementato. [Qui](#3-4-provare-un-pretrained-checkpoint-usando-la-repo-originale), avete give creato e provato uno script che testi il forward pass del modello usando la repo originaria. Ora dovrete fare lo stesso con uno script analogo usando l'implementazione in 🤗 Transformers anziché l'originale. Piu o meno lo script dovrebbe essere: ```python model = BrandNewBertModel.from_pretrained("/path/to/converted/checkpoint/folder") input_ids = [0, 4, 4, 3, 2, 4, 1, 7, 19] output = model(input_ids).last_hidden_states ``` Di solito l'output da 🤗 Transformers non é uguale uguale all'output originario, sopratto la prima volta. Non vi abbattete - é normale! Prima di tutto assicuratevi che non ci siano errori o che non vengano segnalati degli errori nella forward pass. Spesso capita che ci siano dimensioni sbagliate o data type sbagliati, *ad esempio* `torch.long` anziche `torch.float32`. Non esistate a chiedere al team Hugging Face! Nella parte finale assicuratevi che l'implementazione 🤗 Transformers funzioni correttamente cosi da testare che gli output siano equivalenti a una precisione di `1e-3`. Controllate che `outputs.shape` siano le stesse tra 🤗 Transformers e l'implementazione originaria. Poi, controllate che i valori in output siano identici. Questa é sicuramente la parte più difficile, qui una serie di errori comuni quando gli output non sono uguali: - Alcuni layers non sono stati aggiunti, *ad esempio* un *activation* layer non é stato aggiunto, o ci si é scordati di una connessione - La matrice del word embedding non é stata ripareggiata - Ci sono degli embeddings posizionali sbagliati perché l'implementazione originaria ha un offset - Il dropout é in azione durante il forward pass. Per sistemare questo errore controllate che *model.training = False* e che il dropout non sia stato attivato nel forward pass, * per esempio * passate *self.training* a [PyTorch's functional dropout](https://pytorch.org/docs/stable/nn.functional.html?highlight=dropout#torch.nn.functional.dropout) La miglior maniera per sistemare il problema é di vedere all'implementazione originaria del forward pass e in 🤗 Transformers fianco a fianco e vedere se ci sono delle differenze. In teoria, con debug e print degli output intermedie di entrambe le implementazioni nel forward pass nell'esatta posizione del network dovrebbe aiutarvi a vedere dove ci sono differenze tra i due frameworks. Come prima mossa controllate che `input_ids` siano identici in entrambi gli scripts. Da lì andate fino all'ultimo layer. Potrete notare una differenza tra le due implementazioni a quel punto. Una volta che lo stesso output é stato ragguingi, verificate gli output con `torch.allclose(original_output, output, atol=1e-3)`. A questo punto se é tutto a posto: complimenti! Le parti seguenti saranno una passeggiata 😊. **8. Aggiungere i test necessari per il modello** A questo punto avete aggiunto con successo il vostro nuovo modello. Tuttavia, é molto probabile che il modello non sia del tutto ok con il design richiesto. Per essere sicuri che l'implementazione sia consona e compatibile con 🤗 Transformers é necessario implementare dei tests. Il Cookiecutter dovrebbe fornire automaticamente dei file per test per il vostro modello, di solito nella folder `tests/test_modeling_brand_new_bert.py`. Provate questo per verificare l'ok nei test piu comuni: ```bash pytest tests/test_modeling_brand_new_bert.py ``` Una volta sistemati i test comuni, bisogna assicurarsi che il vostro lavoro sia correttamente testato cosicchè: - a) La community puo capire in maniera semplice il vostro lavoro controllando tests specifici del modello *brand_new_bert*, - b) Implementazioni future del vostro modello non rompano alcune feature importante del modello. Per prima cosa agguingete dei test d'integrazione. Questi sono essenziali perche fanno la stessa funzione degli scripts di debug usati precedentemente. Un template per questi tests esiste gia nel Cookiecutter ed é sotto il nome di `BrandNewBertModelIntegrationTests`, voi dovrete solo completarlo. Una volta che questi tests sono OK, provate: ```bash RUN_SLOW=1 pytest -sv tests/test_modeling_brand_new_bert.py::BrandNewBertModelIntegrationTests ``` <Tip> Nel caso siate su Windows, sostituite `RUN_SLOW=1` con `SET RUN_SLOW=1` </Tip> Di seguito, tutte le features che sono utili e necessarire per *brand_new_bert* devono essere testate in test separati, contenuti in `BrandNewBertModelTester`/ `BrandNewBertModelTest`. spesso la gente si scorda questi test, ma ricordate che sono utili per: - Aiuta gli utenti a capire il vostro codice meglio, richiamando l'attenzione su queste nuove features - Developers e contributors futuri potranno velocemente testare nuove implementazioni del modello testanto questi casi speciali. **9. Implementare il tokenizer** A questo punto avremo bisogno un tokenizer per *brand_new_bert*. Di solito il tokenizer é uguale ad altri modelli in 🤗 Transformers. É importante che troviate il file con il tokenizer originale e che lo carichiate in 🤗 Transformers. Per controllare che il tokenizer funzioni in modo corretto, create uno script nella repo originaria che riceva come input una stringa e ritorni gli `input_ids`. Piu o meno questo potrebbe essere il codice: ```python input_str = "This is a long example input string containing special characters .$?-, numbers 2872 234 12 and words." model = BrandNewBertModel.load_pretrained_checkpoint("/path/to/checkpoint/") input_ids = model.tokenize(input_str) ``` Potrebbe richiedere un po' di tempo, ma guardate ancora alla repo originaria per trovare la funzione corretta del tokenizer. A volte capita di dover riscrivere il tokenizer nella repo originaria, di modo da avere come output gli `input_ids`. A quel punto uno script analogo é necessario in 🤗 Transformers: ```python from transformers import BrandNewBertTokenizer input_str = "This is a long example input string containing special characters .$?-, numbers 2872 234 12 and words." tokenizer = BrandNewBertTokenizer.from_pretrained("/path/to/tokenizer/folder/") input_ids = tokenizer(input_str).input_ids ``` Una volta che `input_ids` sono uguali, bisogna aggiungere un test per il tokenizer. Il file test per tokenizer di *brand_new_brand* dovrebbe avere un paio di hard-coded test d'integrazione. **10. Test end-to-end** Ora che avete il tokenizer, dovrete aggiungere dei test d'integrazione per l'intero workflow in `tests/test_modeling_brand_new_bert.py` in 🤗 Transformer. Questi test devono mostrare che un significante campione text-to-text funzioni come ci si aspetta nell'implementazione di 🤗 Transformers. *Per esempio* potreste usare dei source-to-target-translation, o un sommario di un articolo, o un domanda-risposta e cosi via. Se nessuno dei checkpoints é stato ultra parametrizzato per task simili, allora i tests per il modello sono piu che sufficienti. Nello step finale dovete assicurarvi che il modello sia totalmente funzionale, e consigliamo anche di provare a testare su GPU. Puo succedere che ci si scordi un `.to(self.device)` ad esempio. Se non avete accesso a GPU, il team Hugging Face puo provvedere a testare questo aspetto per voi. **11. Aggiungere una Docstring** Siete quasi alla fine! L'ultima cosa rimasta é avere una bella docstring e una pagina doc. Il Cookiecutter dovrebbe provvedere già un template chiamato `docs/source/model_doc/brand_new_bert.rst`, che dovrete compilare. La prima cosa che un utente farà per usare il vostro modello sarà dare una bella lettura al doc. Quindi proponete una documentazione chiara e concisa. É molto utile per la community avere anche delle *Tips* per mostrare come il modello puo' essere usato. Non esitate a chiedere a Hugging Face riguardo alle docstirng. Quindi, assicuratevi che la docstring sia stata aggiunta a `src/transformers/models/brand_new_bert/modeling_brand_new_bert.py`. Assicuratevi che la docstring sia corretta e che includa tutti i necessari input e output. Abbiamo una guida dettagliata per scrivere la documentazione e docstring. **Rifattorizzare il codice** Perfetto! Ora che abbiamo tutto per *brand_new_bert* controllate che lo stile del codice sia ok: ```bash make style ``` E che il codice passi i quality check: ```bash make quality ``` A volte capita che manchino delle informazioninella docstring o alcuni nomi sbagliati, questo farà fallire i tests sopra. Ripetiamo: chiedete pure a Hugging Face, saremo lieti di aiutarvi. Per ultimo, fare del refactoring del codice una volta che é stato creato. Avete finito con il codice, congratulazioni! 🎉 Siete fantasticiiiiiii! 😎 **12. Caricare il modello sul model hub** In questa ultima parte dovrete convertire e caricare il modello, con tutti i checkpoints, nel model hub e aggiungere una model card per ogni checkpoint caricato. Leggete la nostra guida [Model sharing and uploading Page](model_sharing) per avere familiarità con l'hub. Di solito in questa parte lavorate a fianco di Hugging face per decidere un nome che sia ok per ogni checkpoint, per ottenere i permessi necessari per caricare il modello nell'organizzazione dell'autore di *brand_new_bert*. Il metodo `push_to_hub`, presente in tutti i modelli `transformers`, é una maniera rapida e indolore per caricare il vostro checkpoint sull'hub: ```python brand_new_bert.push_to_hub( repo_path_or_name="brand_new_bert", # Uncomment the following line to push to an organization # organization="<ORGANIZATION>", commit_message="Add model", use_temp_dir=True, ) ``` Vale la pena spendere un po' di tempo per creare una model card ad-hoc per ogni checkpoint. Le model cards dovrebbero suggerire le caratteristiche specifiche del checkpoint, *per esempio* su che dataset il checkpoint é stato pretrained o fine-tuned. O che su che genere di task il modello lavoro? E anche buona pratica includere del codice su come usare il modello correttamente. **13. (Opzionale) Aggiungere un notebook** É molto utile aggiungere un notebook, che dimostri in dettaglio come *brand_new_bert* si utilizzi per fare inferenza e/o fine-tuned su specifiche task. Non é una cosa obbligatoria da avere nella vostra PR, ma é molto utile per la community. **14. Sottomettere la PR** L'ultimissimo step! Ovvero il merge della PR nel main. Di solito il team Hugging face a questo punto vi avrà gia aiutato, ma é ok prendere un po' di tempo per pulire la descirzione e commenti nel codice. ### Condividete il vostro lavoro!! É ora tempo di prendere un po' di credito dalla communità per il vostro lavoro! Caricare e implementare un nuovo modello é un grandissimo contributo per Transformers e l'intera community NLP. Il codice e la conversione dei modelli pre-trained sara sicuramente utilizzato da centinaia o migliaia di sviluppatori e ricercatori. Siate fieri e orgogliosi di condividere il vostro traguardo con l'intera community :) ** Avete create un altro modello che é super facile da usare per tutti quanti nella community! 🤯**
transformers/docs/source/it/add_new_model.md/0
{ "file_path": "transformers/docs/source/it/add_new_model.md", "repo_id": "transformers", "token_count": 17333 }
267
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Inferenza Efficiente su GPU Multiple Questo documento contiene informazioni su come fare inferenza in maniera efficiente su GPU multiple. <Tip> Nota: Un setup con GPU multiple può utilizzare la maggior parte delle strategie descritte nella [sezione con GPU singola](./perf_infer_gpu_one). Tuttavia, è necessario conoscere delle tecniche semplici che possono essere utilizzate per un risultato migliore. </Tip> ## `BetterTransformer` per inferenza più rapida Abbiamo recentemente integrato `BetterTransformer` per inferenza più rapida su multi-GPU per modelli su testo, immagini e audio. Controlla il documento con queste integrazioni [qui](https://huggingface.co/docs/optimum/bettertransformer/overview) per maggiori dettagli.
transformers/docs/source/it/perf_infer_gpu_many.md/0
{ "file_path": "transformers/docs/source/it/perf_infer_gpu_many.md", "repo_id": "transformers", "token_count": 420 }
268
<!-- Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ このファイルはMarkdown形式ですが、特定の文法が含まれており、通常のMarkdownビューアーでは正しく表示されない場合があります。 --> # How to add a model to 🤗 Transformers? 🤗 Transformersライブラリは、コミュニティの貢献者のおかげで新しいモデルを提供できることがよくあります。 しかし、これは難しいプロジェクトであり、🤗 Transformersライブラリと実装するモデルについての深い知識が必要です。 Hugging Faceでは、コミュニティの多くの人々に積極的にモデルを追加する力を与えようと努力しており、 このガイドをまとめて、PyTorchモデルを追加するプロセスを説明します([PyTorchがインストールされていることを確認してください](https://pytorch.org/get-started/locally/))。 <Tip> TensorFlowモデルを実装する興味がある場合は、[🤗 TransformersモデルをTensorFlowに変換する方法](add_tensorflow_model)ガイドを参照してみてください! </Tip> この過程で、以下のことを学びます: - オープンソースのベストプラクティスに関する洞察 - 最も人気のある深層学習ライブラリの設計原則を理解する - 大規模なモデルを効率的にテストする方法を学ぶ - `black`、`ruff`、および`make fix-copies`などのPythonユーティリティを統合して、クリーンで読みやすいコードを確保する方法を学ぶ Hugging Faceチームのメンバーがサポートを提供するので、一人ぼっちになることはありません。 🤗 ❤️ さあ、始めましょう!🤗 Transformersで見たいモデルについての[New model addition](https://github.com/huggingface/transformers/issues/new?assignees=&labels=New+model&template=new-model-addition.yml)のイシューを開いてください。 特定のモデルを提供することに特にこだわりがない場合、[New model label](https://github.com/huggingface/transformers/labels/New%20model)で未割り当てのモデルリクエストがあるかどうかを確認して、それに取り組むことができます。 新しいモデルリクエストを開いたら、最初のステップは🤗 Transformersをよく理解することです! ## General overview of 🤗 Transformers まず、🤗 Transformersの一般的な概要を把握する必要があります。🤗 Transformersは非常に意見が分かれるライブラリですので、 ライブラリの哲学や設計選択について同意できない可能性があります。ただし、私たちの経験から、ライブラリの基本的な設計選択と哲学は、 🤗 Transformersを効率的にスケーリングし、適切なレベルで保守コストを抑えるために不可欠です。 ライブラリの理解を深めるための良い出発点は、[哲学のドキュメント](philosophy)を読むことです。 私たちの作業方法の結果、すべてのモデルに適用しようとするいくつかの選択肢があります: - 一般的に、抽象化よりも構成が優先されます。 - コードの重複は、読みやすさやアクセス可能性を大幅に向上させる場合、必ずしも悪いわけではありません。 - モデルファイルはできるだけ自己完結的であるべきで、特定のモデルのコードを読む際には、理想的には該当する`modeling_....py`ファイルのみを見る必要があります。 私たちの意見では、このライブラリのコードは単なる製品を提供する手段だけでなく、*例えば、推論のためにBERTを使用する能力*などの製品そのもの. ### Overview of models モデルを正常に追加するためには、モデルとその設定、[`PreTrainedModel`]、および[`PretrainedConfig`]の相互作用を理解することが重要です。 例示的な目的で、🤗 Transformersに追加するモデルを「BrandNewBert」と呼びます。 以下をご覧ください: <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers_overview.png"/> ご覧のように、🤗 Transformersでは継承を使用していますが、抽象化のレベルを最小限に保っています。 ライブラリ内のどのモデルにも、抽象化のレベルが2つを超えることはありません。 `BrandNewBertModel` は `BrandNewBertPreTrainedModel` を継承し、さらに[`PreTrainedModel`]を継承しています。 これだけです。 一般的なルールとして、新しいモデルは[`PreTrainedModel`]にのみ依存するようにしたいと考えています。 すべての新しいモデルに自動的に提供される重要な機能は、[`~PreTrainedModel.from_pretrained`]および [`~PreTrainedModel.save_pretrained`]です。 これらはシリアライゼーションとデシリアライゼーションに使用されます。 `BrandNewBertModel.forward`などの他の重要な機能は、新しい「modeling_brand_new_bert.py」スクリプトで完全に定義されるべきです。 次に、特定のヘッドレイヤーを持つモデル(たとえば `BrandNewBertForMaskedLM` )が `BrandNewBertModel` を継承するのではなく、 抽象化のレベルを低く保つために、そのフォワードパスで `BrandNewBertModel` を呼び出すコンポーネントとして使用されるようにしたいと考えています。 新しいモデルには常に `BrandNewBertConfig` という設定クラスが必要です。この設定は常に[`PreTrainedModel`]の属性として保存され、 したがって、`BrandNewBertPreTrainedModel`から継承するすべてのクラスで`config`属性を介してアクセスできます。 ```python model = BrandNewBertModel.from_pretrained("brandy/brand_new_bert") model.config # model has access to its config ``` モデルと同様に、設定は[`PretrainedConfig`]から基本的なシリアル化および逆シリアル化の機能を継承しています。注意すべきは、設定とモデルは常に2つの異なる形式にシリアル化されることです - モデルは*pytorch_model.bin*ファイルに、設定は*config.json*ファイルにシリアル化されます。[`~PreTrainedModel.save_pretrained`]を呼び出すと、自動的に[`~PretrainedConfig.save_pretrained`]も呼び出され、モデルと設定の両方が保存されます。 ### Code style 新しいモデルをコーディングする際には、Transformersは意見があるライブラリであり、コードの書き方に関していくつかの独自の考え方があります :-) 1. モデルのフォワードパスはモデリングファイルに完全に記述され、ライブラリ内の他のモデルとは完全に独立している必要があります。他のモデルからブロックを再利用したい場合、コードをコピーしてトップに`# Copied from`コメントを付けて貼り付けます(良い例は[こちら](https://github.com/huggingface/transformers/blob/v4.17.0/src/transformers/models/roberta/modeling_roberta.py#L160)、コピーに関する詳細なドキュメンテーションは[ここ](pr_checks#check-copies)を参照してください)。 2. コードは完全に理解可能でなければなりません。これは記述的な変数名を選択し、省略形を避けるべきであることを意味します。例えば、`act`ではなく`activation`が好まれます。1文字の変数名は、forループ内のインデックスでない限り、強く非推奨です。 3. より一般的に、魔法のような短いコードよりも長くて明示的なコードを好みます。 4. PyTorchでは`nn.Sequential`をサブクラス化せずに、`nn.Module`をサブクラス化し、フォワードパスを記述し、コードを使用する他の人が簡単にデバッグできるようにします。プリントステートメントやブレークポイントを追加してデバッグできるようにします。 5. 関数のシグネチャは型アノテーションを付けるべきです。その他の部分に関しては、型アノテーションよりも良い変数名が読みやすく理解しやすいことがあります。 ### Overview of tokenizers まだ完了していません :-( このセクションは近日中に追加されます! ## Step-by-step recipe to add a model to 🤗 Transformers モデルを追加する方法は人それぞれ異なるため、他のコントリビューターが🤗 Transformersにモデルを追加する際の要約を確認することが非常に役立つ場合があります。以下は、他のコントリビューターが🤗 Transformersにモデルをポートする際のコミュニティブログ投稿のリストです。 1. [GPT2モデルのポーティング](https://medium.com/huggingface/from-tensorflow-to-pytorch-265f40ef2a28) by [Thomas](https://huggingface.co/thomwolf) 2. [WMT19 MTモデルのポーティング](https://huggingface.co/blog/porting-fsmt) by [Stas](https://huggingface.co/stas) 経験から言えることは、モデルを追加する際に最も重要なことは次のようになります: - 車輪の再発明をしないでください!新しい🤗 Transformersモデルのために追加するコードのほとんどはすでに🤗 Transformers内のどこかに存在しています。類似した既存のモデルやトークナイザを見つけるために、いくつかの時間をかけて探すことが重要です。[grep](https://www.gnu.org/software/grep/)と[rg](https://github.com/BurntSushi/ripgrep)はあなたの友達です。モデルのトークナイザは1つのモデル実装に基づいているかもしれませんが、モデルのモデリングコードは別の実装に基づいていることがあることに注意してください。例えば、FSMTのモデリングコードはBARTに基づいており、FSMTのトークナイザコードはXLMに基づいています。 - これは科学的な課題よりもエンジニアリングの課題です。モデルの論文の理論的な側面をすべて理解しようとするよりも、効率的なデバッグ環境を作成するために時間を費やすべきです。 - 行き詰まった場合は助けを求めてください!モデルは🤗 Transformersのコアコンポーネントであり、Hugging Faceではモデルを追加するための各ステップでお手伝いするのを喜んでいます。進行がないことに気付いた場合は、進展していないことを気にしないでください。 以下では、🤗 Transformersにモデルをポートする際に最も役立つと考えられる一般的なレシピを提供しようとしています。 次のリストは、モデルを追加するために行う必要があるすべてのことの要約であり、To-Doリストとして使用できます: - ☐ (オプション)モデルの理論的な側面を理解しました - ☐ 🤗 Transformersの開発環境を準備しました - ☐ オリジナルのリポジトリのデバッグ環境をセットアップしました - ☐ `forward()` パスをオリジナルのリポジトリとチェックポイントで正常に実行するスクリプトを作成しました - ☐ モデルの骨格を🤗 Transformersに正常に追加しました - ☐ オリジナルのチェックポイントを🤗 Transformersのチェックポイントに正常に変換しました - ☐ 🤗 Transformersで実行される `forward()` パスを正常に実行し、オリジナルのチェックポイントと同一の出力を得ました - ☐ 🤗 Transformersでのモデルテストを完了しました - ☐ 🤗 Transformersにトークナイザを正常に追加しました - ☐ エンドツーエンドの統合テストを実行しました - ☐ ドキュメントを完成させました - ☐ モデルのウェイトをHubにアップロードしました - ☐ プルリクエストを提出しました - ☐ (オプション)デモノートブックを追加しました まず、通常、`BrandNewBert`の理論的な理解を深めることをお勧めします。 ただし、もしモデルの理論的な側面を「実務中に理解する」方が好ましい場合、`BrandNewBert`のコードベースに直接アクセスするのも問題ありません。 このオプションは、エンジニアリングのスキルが理論的なスキルよりも優れている場合、 `BrandNewBert`の論文を理解するのに苦労している場合、または科学的な論文を読むよりもプログラミングを楽しんでいる場合に適しています。 ### 1. (Optional) Theoretical aspects of BrandNewBert BrandNewBertの論文がある場合、その説明を読むための時間を取るべきです。論文の中には理解が難しい部分があるかもしれません。 その場合でも心配しないでください。目標は論文の深い理論的理解を得ることではなく、 🤗 Transformersでモデルを効果的に再実装するために必要な情報を抽出することです。 ただし、理論的な側面にあまり多くの時間をかける必要はありません。代わりに、実践的な側面に焦点を当てましょう。具体的には次の点です: - *brand_new_bert*はどの種類のモデルですか? BERTのようなエンコーダーのみのモデルですか? GPT2のようなデコーダーのみのモデルですか? BARTのようなエンコーダー-デコーダーモデルですか? [model_summary](model_summary)を参照して、これらの違いについて詳しく知りたい場合があります。 - *brand_new_bert*の応用分野は何ですか? テキスト分類ですか? テキスト生成ですか? Seq2Seqタスク、例えば要約ですか? - モデルをBERT/GPT-2/BARTとは異なるものにする新しい機能は何ですか? - 既存の[🤗 Transformersモデル](https://huggingface.co/transformers/#contents)の中で*brand_new_bert*に最も似ているモデルはどれですか? - 使用されているトークナイザの種類は何ですか? SentencePieceトークナイザですか? WordPieceトークナイザですか? BERTやBARTで使用されているトークナイザと同じですか? モデルのアーキテクチャの良い概要を得たと感じたら、Hugging Faceチームに質問を送ることができます。 これにはモデルのアーキテクチャ、注意層などに関する質問が含まれるかもしれません。 私たちは喜んでお手伝いします。 ### 2. Next prepare your environment 1. リポジトリのページで「Fork」ボタンをクリックして、[リポジトリ](https://github.com/huggingface/transformers)をフォークします。 これにより、コードのコピーがGitHubユーザーアカウントの下に作成されます。 2. ローカルディスクにある`transformers`フォークをクローンし、ベースリポジトリをリモートとして追加します: ```bash git clone https://github.com/[your Github handle]/transformers.git cd transformers git remote add upstream https://github.com/huggingface/transformers.git ``` ```bash python -m venv .env source .env/bin/activate pip install -e ".[dev]" ``` 3. 開発環境をセットアップするために、次のコマンドを実行してください: ```bash python -m venv .env source .env/bin/activate pip install -e ".[dev]" ``` お使いのOSに応じて、およびTransformersのオプションの依存関係の数が増えているため、このコマンドでエラーが発生する可能性があります。 その場合は、作業しているDeep Learningフレームワーク(PyTorch、TensorFlow、および/またはFlax)をインストールし、次の手順を実行してください: ```bash pip install -e ".[quality]" ``` これはほとんどのユースケースには十分であるはずです。その後、親ディレクトリに戻ることができます。 ```bash cd .. ``` 4. Transformersに*brand_new_bert*のPyTorchバージョンを追加することをお勧めします。PyTorchをインストールするには、 https://pytorch.org/get-started/locally/ の指示に従ってください。 **注意:** CUDAをインストールする必要はありません。新しいモデルをCPUで動作させることで十分です。 5. *brand_new_bert*を移植するには、元のリポジトリへのアクセスも必要です。 ```bash git clone https://github.com/org_that_created_brand_new_bert_org/brand_new_bert.git cd brand_new_bert pip install -e . ``` *brand_new_bert*を🤗 Transformersにポートするための開発環境を設定しました。 ### 3.-4. Run a pretrained checkpoint using the original repository 最初に、オリジナルの*brand_new_bert*リポジトリで作業します。通常、オリジナルの実装は非常に「研究的」であり、ドキュメンテーションが不足していたり、コードが理解しにくいことがあります。しかし、これが*brand_new_bert*を再実装する動機となるべきです。Hugging Faceでは、主要な目標の1つが、動作するモデルを取り、それをできるだけ**アクセス可能でユーザーフレンドリーで美しい**ものに書き直すことです。これは、🤗 Transformersにモデルを再実装する最も重要な動機です - 複雑な新しいNLP技術を**誰にでも**アクセス可能にしようとする試みです。 まず、オリジナルのリポジトリに入り込むことから始めるべきです。 公式の事前学習済みモデルをオリジナルのリポジトリで正常に実行することは、通常、**最も困難な**ステップです。 私たちの経験から、オリジナルのコードベースに慣れるのに時間をかけることが非常に重要です。以下のことを理解する必要があります: - 事前学習済みの重みをどこで見つけるか? - 対応するモデルに事前学習済みの重みをロードする方法は? - モデルから独立してトークナイザを実行する方法は? - 1つのフォワードパスを追跡して、単純なフォワードパスに必要なクラスと関数がわかるようにします。通常、これらの関数だけを再実装する必要があります。 - モデルの重要なコンポーネントを特定できること:モデルのクラスはどこにありますか?モデルのサブクラス、*例* EncoderModel、DecoderModelがありますか?自己注意レイヤーはどこにありますか?複数の異なる注意レイヤー、*例* *自己注意*、*クロスアテンション*などが存在しますか? - オリジナルのリポジトリの環境でモデルをデバッグする方法は?*print*ステートメントを追加する必要があるか、*ipdb*のような対話型デバッガを使用できるか、PyCharmのような効率的なIDEを使用してモデルをデバッグする必要がありますか? 重要なのは、ポーティングプロセスを開始する前に、オリジナルのリポジトリでコードを**効率的に**デバッグできることです!また、これはオープンソースライブラリで作業していることを覚えておいてください。オリジナルのリポジトリでコードを調べる誰かを歓迎するために、問題をオープンにしたり、プルリクエストを送信したりすることをためらわないでください。このリポジトリのメンテナーは、彼らのコードを調べてくれる人に対して非常に喜んでいる可能性が高いです! この段階では、オリジナルのモデルのデバッグにどのような環境と戦略を使用するかは、あなた次第です。最初にオリジナルのリポジトリに関するコードをデバッグできることが非常に重要です。また、GPU環境をセットアップすることはお勧めしません。まず、CPU上で作業し、モデルがすでに🤗 Transformersに正常にポートされていることを確認します。最後に、モデルがGPU上でも期待通りに動作するかどうかを検証する必要があります。 一般的に、オリジナルのモデルを実行するための2つのデバッグ環境があります: - [Jupyter notebooks](https://jupyter.org/) / [google colab](https://colab.research.google.com/notebooks/intro.ipynb) - ローカルなPythonスクリプト。 Jupyterノートブックは、セルごとに実行できるため、論理的なコンポーネントをより分割し、中間結果を保存できるため、デバッグサイクルが速くなるという利点があります。また、ノートブックは他の共同作業者と簡単に共有できることが多く、Hugging Faceチームに助けを求める場合に非常に役立つ場合があります。Jupyterノートブックに精通している場合、それ ```python model = BrandNewBertModel.load_pretrained_checkpoint("/path/to/checkpoint/") input_ids = [0, 4, 5, 2, 3, 7, 9] # vector of input ids original_output = model.predict(input_ids) ``` デバッグ戦略については、通常、いくつかの選択肢があります: - 元のモデルを多くの小さなテスト可能なコンポーネントに分解し、それぞれに対して前方パスを実行して検証します - 元のモデルを元のトークナイザと元のモデルにのみ分解し、それらに対して前方パスを実行し、検証のために中間のプリントステートメントまたはブレークポイントを使用します 再度、どの戦略を選択するかはあなた次第です。元のコードベースに依存することが多く、元のコードベースに応じて一方または他方が有利なことがあります。 元のコードベースがモデルを小さなサブコンポーネントに分解できる場合、*例えば*元のコードベースが簡単にイーガーモードで実行できる場合、それを行う価値が通常あります。最初からより難しい方法を選択することにはいくつかの重要な利点があります: - 後で元のモデルを🤗 Transformersの実装と比較する際に、各コンポーネントが対応する🤗 Transformers実装のコンポーネントと一致することを自動的に検証できるため、視覚的な比較に依存せずに済みます - 大きな問題を小さな問題に分解する、つまり個々のコンポーネントのみをポーティングする問題に分割するのに役立ち、作業を構造化するのに役立ちます - モデルを論理的な意味のあるコンポーネントに分割することで、モデルの設計をよりよく理解しやすくし、モデルをよりよく理解するのに役立ちます - 後で、コンポーネントごとのテストを行うことで、コードを変更し続ける際にリグレッションが発生しないことを確認するのに役立ちます [Lysandreの](https://gist.github.com/LysandreJik/db4c948f6b4483960de5cbac598ad4ed) ELECTRAの統合チェックは、これがどのように行われるかの良い例です。 ただし、元のコードベースが非常に複雑で、中間コンポーネントをコンパイルモードで実行することしか許可しない場合、モデルを小さなテスト可能なサブコンポーネントに分解することが時間がかかりすぎるか、不可能であることがあります。 良い例は[T5のMeshTensorFlow](https://github.com/tensorflow/mesh/tree/master/mesh_tensorflow)ライブラリであり、非常に複雑でモデルをサブコンポーネントに分解する簡単な方法を提供しないことがあります。このようなライブラリでは、通常、プリントステートメントを検証することに依存します。 どの戦略を選択しても、推奨される手順は通常同じで、最初のレイヤーからデバッグを開始し、最後のレイヤーからデバッグを行うべきです。 通常、以下の順序で次のレイヤーからの出力を取得することをお勧めします: 1. モデルに渡された入力IDを取得する 2. 単語の埋め込みを取得する 3. 最初のTransformerレイヤーの入力を取得する 4. 最初のTransformerレイヤーの出力を取得する 5. 次のn - 1つのTransformerレイヤーの出力を取得する 6. BrandNewBertモデル全体の出力を取得する 入力IDは整数の配列である必要があり、*例:* `input_ids = [0, 4, 4, 3, 2, 4, 1, 7, 19]` のようになります。 以下のレイヤーの出力は多次元の浮動小数点配列であることが多く、次のようになることがあります: ``` [[ [-0.1465, -0.6501, 0.1993, ..., 0.1451, 0.3430, 0.6024], [-0.4417, -0.5920, 0.3450, ..., -0.3062, 0.6182, 0.7132], [-0.5009, -0.7122, 0.4548, ..., -0.3662, 0.6091, 0.7648], ..., [-0.5613, -0.6332, 0.4324, ..., -0.3792, 0.7372, 0.9288], [-0.5416, -0.6345, 0.4180, ..., -0.3564, 0.6992, 0.9191], [-0.5334, -0.6403, 0.4271, ..., -0.3339, 0.6533, 0.8694]]], ``` 🤗 Transformersに追加されるすべてのモデルは、統合テストを数回合格することが期待されており、元のモデルと🤗 Transformersで再実装されたバージョンが、0.001の精度までまったく同じ出力を提供する必要があります。 異なるライブラリフレームワークで同じモデルを書いた場合、わずかに異なる出力を返すことが正常であるため、誤差許容値として1e-3(0.001)を受け入れています。モデルがほぼ同じ出力を返すだけでは不十分で、ほぼ同一である必要があります。そのため、🤗 Transformersバージョンの中間出力を元の*brand_new_bert*の実装の中間出力と複数回にわたって比較することになるでしょう。その際、元のリポジトリの**効率的な**デバッグ環境が非常に重要です。以下は、デバッグ環境をできるだけ効率的にするためのアドバイスです。 - 中間結果をデバッグする最適な方法を見つける。元のリポジトリはPyTorchで書かれていますか?その場合、元のモデルをより小さなサブコンポーネントに分解して中間値を取得する長いスクリプトを書くことがおそらく適切です。元のリポジトリがTensorflow 1で書かれている場合、[tf.print](https://www.tensorflow.org/api_docs/python/tf/print)などのTensorFlowのプリント操作を使用して中間値を出力する必要があるかもしれません。元のリポジトリがJaxで書かれている場合、フォワードパスの実行時にモデルが**jittedされていない**ことを確認してください。例:[このリンク](https://github.com/google/jax/issues/196)をチェック。 - 使用可能な最小の事前学習済みチェックポイントを使用します。チェックポイントが小さいほど、デバッグサイクルが速くなります。事前学習済みモデルがフォワードパスに10秒以上かかる場合、効率的ではありません。非常に大きなチェックポイントしか利用できない場合、新しい環境でランダムに初期化されたウェイトを持つダミーモデルを作成し、それらのウェイトを🤗 Transformersバージョンのモデルと比較する方が良いかもしれません。 - 元のリポジトリでフォワードパスを呼び出す最も簡単な方法を使用していることを確認してください。理想的には、元のリポジトリで**単一のフォワードパス**を呼び出す関数を見つけたいです。これは通常「predict」、「evaluate」、「forward」、「__call__」と呼ばれます。複数回「forward」を呼び出す関数をデバッグしたくありません。例:テキストを生成するために「autoregressive_sample」、「generate」と呼ばれる関数。 - トークナイゼーションとモデルの「フォワード」パスを分離しようとしてください。元のリポジトリが入力文字列を入力する必要がある例を示す場合、フォワードコール内で文字列入力が入力IDに変更される場所を特定し、このポイントから開始します。これは、スクリプトを自分で書くか、入力文字列ではなく入力IDを直接入力できるように元のコードを変更する必要があるかもしれません。 - デバッグセットアップ内のモデルがトレーニングモードではないことを確認してください。トレーニングモードでは、モデル内の複数のドロップアウトレイヤーのためにランダムな出力が生成されることがあります。デバッグ環境のフォワードパスが**決定論的**であることを確認し、ドロップアウトレイヤーが使用されないようにします。または、新しい実装が同じフレームワーク内にある場合、*transformers.utils.set_seed*を使用してください。 以下のセクションでは、*brand_new_bert*についてこれを具体的にどのように行うかについての詳細/ヒントを提供します。 ### 5.-14. Port BrandNewBert to 🤗 Transformers 次に、ついに新しいコードを🤗 Transformersに追加できます。🤗 Transformersのフォークのクローンに移動してください: ```bash cd transformers ``` 特別なケースとして、既存のモデルと完全に一致するアーキテクチャのモデルを追加する場合、 [このセクション](#write-a-conversion-script)で説明されているように、変換スクリプトを追加するだけで済みます。 この場合、既存のモデルの完全なモデルアーキテクチャを再利用できます。 それ以外の場合、新しいモデルの生成を開始します。ここで2つの選択肢があります: - `transformers-cli add-new-model-like`を使用して既存のモデルのような新しいモデルを追加します - `transformers-cli add-new-model`を使用して、テンプレートから新しいモデルを追加します(モデルのタイプに応じてBERTまたはBartのように見えます) どちらの場合でも、モデルの基本情報を入力するための質問事項が表示されます。 2番目のコマンドを実行するには、`cookiecutter`をインストールする必要があります。 詳細については[こちら](https://github.com/huggingface/transformers/tree/main/templates/adding_a_new_model)をご覧ください。 **主要な huggingface/transformers リポジトリでプルリクエストを開く** 自動生成されたコードを適応し始める前に、🤗 Transformers に「作業中(WIP)」プルリクエストを開くタイミングです。 例:「[WIP] *brand_new_bert* を追加」などです。 これにより、ユーザーと Hugging Face チームが🤗 Transformers にモデルを統合する作業を並行して行うことができます。 以下の手順を実行してください: 1. メインブランチから分かりやすい名前のブランチを作成します。 ```bash git checkout -b add_brand_new_bert ``` 2. 自動生成されたコードをコミットしてください: ```bash git add . git commit ``` 3. 現在の main ブランチにフェッチしてリベース ```bash git fetch upstream git rebase upstream/main ``` 4. 変更をあなたのアカウントにプッシュするには、次のコマンドを使用します: ```bash git push -u origin a-descriptive-name-for-my-changes ``` 5. 満足したら、GitHub上のフォークのウェブページに移動します。[プルリクエスト]をクリックします。将来の変更に備えて、Hugging Face チームのメンバーのGitHubハンドルをレビュアーとして追加してください。 6. GitHubのプルリクエストウェブページの右側にある「ドラフトに変換」をクリックして、PRをドラフトに変更します。 以下では、進捗があった場合は常に作業をコミットし、プッシュしてプルリクエストに表示されるようにしてください。さらに、定期的にメインからの最新の変更を取り込むために、次のように行うことを忘れないでください: ```bash git fetch upstream git merge upstream/main ``` 一般的に、モデルや実装に関する質問はPull Request (PR) で行い、PR内で議論し、解決します。 これにより、Hugging Face チームは新しいコードをコミットする際や質問がある場合に常に通知を受けることができます。 質問や問題が解決された際に、問題や質問が理解されやすいように、Hugging Face チームにコードを指摘することが非常に役立ちます。 このためには、「Files changed」タブに移動してすべての変更を表示し、質問したい行に移動して「+」シンボルをクリックしてコメントを追加します。 質問や問題が解決された場合は、作成されたコメントの「Resolve」ボタンをクリックできます。 同様に、Hugging Face チームはコードをレビューする際にコメントを開きます。 PR上でのほとんどの質問はGitHub上で行うことをお勧めします。 一般的な質問に関しては、公にはあまり役立たない質問については、SlackやメールでHugging Face チームに連絡することもできます。 **5. 生成されたモデルコードを"brand_new_bert"に適応させる** 最初に、モデル自体に焦点を当て、トークナイザには気にしないでください。 関連するコードは、生成されたファイル`src/transformers/models/brand_new_bert/modeling_brand_new_bert.py`および`src/transformers/models/brand_new_bert/configuration_brand_new_bert.py`で見つかるはずです。 さて、ついにコーディングを始めることができます :smile:。 `src/transformers/models/brand_new_bert/modeling_brand_new_bert.py`にある生成されたコードは、エンコーダーのみのモデルであればBERTと同じアーキテクチャを持っているか、エンコーダー-デコーダーモデルであればBARTと同じアーキテクチャを持っているはずです。 この段階では、モデルの理論的な側面について学んだことを思い出すべきです。つまり、「このモデルはBERTまたはBARTとどのように異なるのか?」ということです。 これらの変更を実装しますが、これは通常、セルフアテンションレイヤー、正規化レイヤーの順序などを変更することを意味します。 再び、あなたのモデルがどのように実装されるべきかをより良く理解するために、Transformers内に既存のモデルの類似アーキテクチャを見ることが役立つことがあります。 この時点では、コードが完全に正確またはクリーンである必要はありません。 むしろ、まずは必要なコードの最初の*クリーンでない*コピー&ペーストバージョンを `src/transformers/models/brand_new_bert/modeling_brand_new_bert.py`に追加し、必要なコードがすべて追加されていると感じるまで改善/修正を反復的に行うことがお勧めです。 私たちの経験から、必要なコードの最初のバージョンを迅速に追加し、次のセクションで説明する変換スクリプトを使用してコードを繰り返し改善/修正する方が効率的であることが多いです。 この時点で動作する必要があるのは、🤗 Transformersの"brand_new_bert"の実装をインスタンス化できることだけです。つまり、以下のコマンドが機能する必要があります: ```python from transformers import BrandNewBertModel, BrandNewBertConfig model = BrandNewBertModel(BrandNewBertConfig()) ``` 上記のコマンドは、`BrandNewBertConfig()` で定義されたデフォルトパラメータに従ってモデルを作成し、 すべてのコンポーネントの `init()` メソッドが正常に動作することを確認します。 すべてのランダムな初期化は、`BrandnewBertPreTrainedModel` クラスの `_init_weights` メソッドで行う必要があります。 このメソッドは、設定変数に依存するすべてのリーフモジュールを初期化する必要があります。以下は、BERT の `_init_weights` メソッドの例です: ```py def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) ``` 特定のモジュールに特別な初期化が必要な場合、カスタムスキームをさらに持つことができます。たとえば、 `Wav2Vec2ForPreTraining`では、最後の2つの線形層には通常のPyTorchの`nn.Linear`の初期化が必要ですが、 他のすべての層は上記のような初期化を使用する必要があります。これは以下のようにコーディングされています: ```py def _init_weights(self, module): """Initialize the weights""" if isinstance(module, Wav2Vec2ForPreTraining): module.project_hid.reset_parameters() module.project_q.reset_parameters() module.project_hid._is_hf_initialized = True module.project_q._is_hf_initialized = True elif isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() ``` `_is_hf_initialized`フラグは、サブモジュールを一度だけ初期化することを確実にするために内部で使用されます。 `module.project_q`と`module.project_hid`のためにそれを`True`に設定することで、 カスタム初期化が後で上書きされないようにし、`_init_weights`関数がそれらに適用されないようにします。 **6. 変換スクリプトを書く** 次に、*brand_new_bert* の元のリポジトリでデバッグに使用したチェックポイントを、新しく作成した 🤗 Transformers 実装の *brand_new_bert* と互換性のあるチェックポイントに変換できる変換スクリプトを書く必要があります。 変換スクリプトをゼロから書くことはお勧めされませんが、代わりに 🤗 Transformers で既に存在する類似のモデルを同じフレームワークで変換したスクリプトを調べることが良いでしょう。 通常、既存の変換スクリプトをコピーして、自分のユースケースにわずかに適応させることで十分です。 Hugging Face チームに既存のモデルに類似した変換スクリプトを教えてもらうことも躊躇しないでください。 - TensorFlowからPyTorchにモデルを移植している場合、良い出発点はBERTの変換スクリプトかもしれません [here](https://github.com/huggingface/transformers/blob/7acfa95afb8194f8f9c1f4d2c6028224dbed35a2/src/transformers/models/bert/modeling_bert.py#L91) - PyTorchからPyTorchにモデルを移植している場合、良い出発点はBARTの変換スクリプトかもしれません [here](https://github.com/huggingface/transformers/blob/main/src/transformers/models/bart/convert_bart_original_pytorch_checkpoint_to_pytorch.py) 以下では、PyTorchモデルが層の重みをどのように保存し、層の名前を定義するかについて簡単に説明します。 PyTorchでは、層の名前は層に与えるクラス属性の名前によって定義されます。 PyTorchで `SimpleModel` というダミーモデルを定義しましょう: ```python from torch import nn class SimpleModel(nn.Module): def __init__(self): super().__init__() self.dense = nn.Linear(10, 10) self.intermediate = nn.Linear(10, 10) self.layer_norm = nn.LayerNorm(10) ``` これで、このモデル定義のインスタンスを作成し、`dense`、`intermediate`、`layer_norm`のすべての重みをランダムな重みで埋めたモデルを作成できます。モデルのアーキテクチャを確認するために、モデルを印刷してみましょう。 ```python model = SimpleModel() print(model) ``` これは以下を出力します: ``` SimpleModel( (dense): Linear(in_features=10, out_features=10, bias=True) (intermediate): Linear(in_features=10, out_features=10, bias=True) (layer_norm): LayerNorm((10,), eps=1e-05, elementwise_affine=True) ) ``` 層の名前はPyTorchのクラス属性の名前によって定義されています。特定の層の重み値を出力することができます: ```python print(model.dense.weight.data) ``` ランダムに初期化された重みを確認するために ``` tensor([[-0.0818, 0.2207, -0.0749, -0.0030, 0.0045, -0.1569, -0.1598, 0.0212, -0.2077, 0.2157], [ 0.1044, 0.0201, 0.0990, 0.2482, 0.3116, 0.2509, 0.2866, -0.2190, 0.2166, -0.0212], [-0.2000, 0.1107, -0.1999, -0.3119, 0.1559, 0.0993, 0.1776, -0.1950, -0.1023, -0.0447], [-0.0888, -0.1092, 0.2281, 0.0336, 0.1817, -0.0115, 0.2096, 0.1415, -0.1876, -0.2467], [ 0.2208, -0.2352, -0.1426, -0.2636, -0.2889, -0.2061, -0.2849, -0.0465, 0.2577, 0.0402], [ 0.1502, 0.2465, 0.2566, 0.0693, 0.2352, -0.0530, 0.1859, -0.0604, 0.2132, 0.1680], [ 0.1733, -0.2407, -0.1721, 0.1484, 0.0358, -0.0633, -0.0721, -0.0090, 0.2707, -0.2509], [-0.1173, 0.1561, 0.2945, 0.0595, -0.1996, 0.2988, -0.0802, 0.0407, 0.1829, -0.1568], [-0.1164, -0.2228, -0.0403, 0.0428, 0.1339, 0.0047, 0.1967, 0.2923, 0.0333, -0.0536], [-0.1492, -0.1616, 0.1057, 0.1950, -0.2807, -0.2710, -0.1586, 0.0739, 0.2220, 0.2358]]). ``` スクリプト内の変換スクリプトでは、ランダムに初期化された重みを、対応するチェックポイント内の正確な重みで埋める必要があります。例えば、以下のように翻訳します: ```python # retrieve matching layer weights, e.g. by # recursive algorithm layer_name = "dense" pretrained_weight = array_of_dense_layer model_pointer = getattr(model, "dense") model_pointer.weight.data = torch.from_numpy(pretrained_weight) ``` PyTorchモデルの各ランダム初期化された重みと対応する事前学習済みチェックポイントの重みが **形状と名前の両方**で正確に一致することを確認する必要があります。 これを行うために、形状に対するassertステートメントを追加し、チェックポイントの重みの名前を出力することが **必要不可欠**です。例えば、次のようなステートメントを追加する必要があります: ```python assert ( model_pointer.weight.shape == pretrained_weight.shape ), f"Pointer shape of random weight {model_pointer.shape} and array shape of checkpoint weight {pretrained_weight.shape} mismatched" ``` また、両方の重みの名前を印刷して、一致していることを確認する必要があります。例えば、次のようにします: ```python logger.info(f"Initialize PyTorch weight {layer_name} from {pretrained_weight.name}") ``` もし形状または名前のいずれかが一致しない場合、おそらく誤って🤗 Transformersの実装に初期化されたレイヤーに間違ったチェックポイントの重みを割り当ててしまった可能性があります。 誤った形状は、おそらく`BrandNewBertConfig()`での設定パラメーターが、変換したいチェックポイントで使用されたものと正確に一致しないためです。 ただし、PyTorchのレイヤーの実装によっては、重みを事前に転置する必要がある場合もあります。 最後に、**すべて**の必要な重みが初期化されていることを確認し、初期化に使用されなかったすべてのチェックポイントの重みを表示して、モデルが正しく変換されていることを確認してください。 変換トライアルが誤った形状ステートメントまたは誤った名前割り当てで失敗するのは完全に正常です。 これはおそらく、`BrandNewBertConfig()`で誤ったパラメーターを使用したか、🤗 Transformersの実装に誤ったアーキテクチャがあるか、🤗 Transformersの実装の1つのコンポーネントの`init()`関数にバグがあるか、チェックポイントの重みの1つを転置する必要があるためです。 このステップは、以前のステップと繰り返すべきです。すべてのチェックポイントの重みが正しく🤗 Transformersモデルに読み込まれるまで繰り返すべきです。 🤗 Transformers実装に正しくチェックポイントを読み込んだ後、選択したフォルダーにモデルを保存できます `/path/to/converted/checkpoint/folder`。このフォルダには`pytorch_model.bin`ファイルと`config.json`ファイルの両方が含まれるはずです。 ```python model.save_pretrained("/path/to/converted/checkpoint/folder") ``` **7. 順伝播(forward pass)の実装** 🤗 Transformers実装で事前学習済みの重みを正しく読み込んだ後、順伝播が正しく実装されていることを確認する必要があります。[元のリポジトリを理解する](#3-4-run-a-pretrained-checkpoint-using-the-original-repository)で、元のリポジトリを使用してモデルの順伝播を実行するスクリプトをすでに作成しました。今度は、元のリポジトリの代わりに🤗 Transformers実装を使用して類似のスクリプトを作成する必要があります。以下のようになります: ```python model = BrandNewBertModel.from_pretrained("/path/to/converted/checkpoint/folder") input_ids = [0, 4, 4, 3, 2, 4, 1, 7, 19] output = model(input_ids).last_hidden_states ``` 🤗 Transformersの実装と元のモデルの実装が最初の実行で完全に同じ出力を提供しないか、 フォワードパスでエラーが発生する可能性が非常に高いです。失望しないでください - これは予想されていることです! まず、フォワードパスがエラーをスローしないことを確認する必要があります。 間違った次元が使用され、*次元の不一致*エラーや、誤ったデータ型オブジェクトが使用されることがよくあります。 例えば、`torch.long`ではなく`torch.float32`が使用されます。特定のエラーを解決できない場合は、 Hugging Faceチームに助けを求めることを躊躇しないでください。 🤗 Transformers実装が正しく機能することを確認する最終的な部分は、出力が`1e-3`の精度で同等であることを確認することです。 まず、出力の形状が同一であること、つまりスクリプトの🤗 Transformers実装と元の実装の両方で`outputs.shape`が同じ値を生成する必要があります。 次に、出力値が同一であることを確認する必要があります。 これは新しいモデルを追加する際の最も難しい部分の1つです。 出力が同一でない理由の一般的な間違いは以下の通りです。 - 一部のレイヤーが追加されていない、つまり*活性化*レイヤーが追加されていないか、リザバル接続が忘れられている - 単語埋め込み行列が結ばれていない - オリジナルの実装がオフセットを使用しているため、誤った位置埋め込みが使用されている - フォワードパス中にドロップアウトが適用されています。これを修正するには、*model.trainingがFalse*であることを確認し、フォワードパス中に誤ってドロップアウトレイヤーがアクティブ化されないようにします。 *つまり* [PyTorchのfunctional dropout](https://pytorch.org/docs/stable/nn.functional.html?highlight=dropout#torch.nn.functional.dropout)に*model.training*を渡します。 問題を修正する最良の方法は、通常、元の実装と🤗 Transformers実装のフォワードパスを並べて表示し、違いがあるかどうかを確認することです。 理想的には、フォワードパスの両方の実装の中間出力をデバッグ/プリントアウトして、🤗 Transformers実装が元の実装と異なる出力を示すネットワーク内の正確な位置を見つけることができます。 最初に、両方のスクリプトのハードコーディングされた`input_ids`が同一であることを確認します。 次に、`input_ids`の最初の変換(通常、単語埋め込み)の出力が同一であることを確認します。 その後、ネットワークの最後のレイヤーまで作業を進めます。 いずれかの時点で、2つの実装間で違いがあることに気付くはずで、それにより🤗 Transformers実装のバグの場所が特定されます。 経験上、元の実装と🤗 Transformers実装のフォワードパスの同じ位置に多くのプリントステートメントを追加し、 中間プレゼンテーションで同じ値を示すプリントステートメントを段階的に削除するのがシンプルかつ効果的な方法です。 両方の実装が同じ出力を生成することに自信を持っている場合、`torch.allclose(original_output, output, atol=1e-3)`を使用して出力を確認すると、最も難しい部分が完了します! おめでとうございます - 完了する作業は簡単なものになるはずです 😊。 **8. 必要なすべてのモデルテストを追加** この時点で、新しいモデルが正常に追加されました。 ただし、モデルがまだ必要な設計に完全に準拠していない可能性が非常に高いです。 🤗 Transformersと完全に互換性があることを確認するために、すべての一般的なテストがパスする必要があります。 Cookiecutterはおそらくモデル用のテストファイルを自動的に追加しているはずで、おそらく同じディレクトリに`tests/models/brand_new_bert/test_modeling_brand_new_bert.py`として存在します。 このテストファイルを実行して、すべての一般的なテストがパスすることを確認してください: ```bash pytest tests/models/brand_new_bert/test_modeling_brand_new_bert.py ``` すべての一般的なテストを修正したら、今度は実行したすべての素晴らしい作業が適切にテストされていることを確認することが非常に重要です。これにより、 - a) コミュニティは*brand_new_bert*の特定のテストを見ることで、あなたの作業を簡単に理解できます。 - b) モデルへの将来の変更がモデルの重要な機能を壊さないようにすることができます。 まず、統合テストを追加する必要があります。これらの統合テストは、基本的にはデバッグスクリプトと同じことを行います。これらのモデルテストのテンプレートはCookiecutterによって既に追加されており、「BrandNewBertModelIntegrationTests」と呼ばれています。このテストを記入するだけです。これらのテストが合格していることを確認するには、次のコマンドを実行します。 ```bash RUN_SLOW=1 pytest -sv tests/models/brand_new_bert/test_modeling_brand_new_bert.py::BrandNewBertModelIntegrationTests ``` <Tip> Windowsを使用している場合、`RUN_SLOW=1`を`SET RUN_SLOW=1`に置き換えてください。 </Tip> 次に、*brand_new_bert*に特有のすべての特徴は、別個のテスト内で追加されるべきです。 `BrandNewBertModelTester`/`BrandNewBertModelTest`の下に。この部分はよく忘れられますが、2つの点で非常に役立ちます: - モデルの追加中に獲得した知識をコミュニティに伝え、*brand_new_bert*の特別な機能がどのように動作するかを示すことによって、知識の共有を支援します。 - 将来の貢献者は、これらの特別なテストを実行することでモデルへの変更を迅速にテストできます。 **9. トークナイザの実装** 次に、*brand_new_bert*のトークナイザを追加する必要があります。通常、トークナイザは🤗 Transformersの既存のトークナイザと同等か非常に似ています。 トークナイザが正しく動作することを確認するためには、まず、元のリポジトリ内で文字列を入力し、`input_ids`を返すスクリプトを作成することをお勧めします。 このスクリプトは、次のように見えるかもしれません(疑似コードで示します): ```python input_str = "This is a long example input string containing special characters .$?-, numbers 2872 234 12 and words." model = BrandNewBertModel.load_pretrained_checkpoint("/path/to/checkpoint/") input_ids = model.tokenize(input_str) ``` オリジナルのリポジトリを詳しく調査し、正しいトークナイザの関数を見つける必要があるかもしれません。 または、オリジナルのリポジトリのクローンを変更して、`input_ids`だけを出力するようにする必要があるかもしれません。 オリジナルのリポジトリを使用した機能的なトークナイゼーションスクリプトを作成した後、 🤗 Transformers向けの類似したスクリプトを作成する必要があります。 以下のように見えるべきです: ```python from transformers import BrandNewBertTokenizer input_str = "This is a long example input string containing special characters .$?-, numbers 2872 234 12 and words." tokenizer = BrandNewBertTokenizer.from_pretrained("/path/to/tokenizer/folder/") input_ids = tokenizer(input_str).input_ids ``` `input_ids`が同じ値を生成した場合、最終ステップとしてトークナイザのテストファイルも追加するべきです。 *brand_new_bert*のモデルングテストファイルと同様に、*brand_new_bert*のトークナイズテストファイルには、いくつかのハードコードされた統合テストが含まれるべきです。 **10. エンドツーエンド統合テストの実行** トークナイザを追加した後、`🤗 Transformers`内の`tests/models/brand_new_bert/test_modeling_brand_new_bert.py`に モデルとトークナイザの両方を使用するいくつかのエンドツーエンド統合テストも追加する必要があります。 このようなテストは、🤗 Transformersの実装が期待どおりに機能することを示すべきです。 意味のあるテキスト対テキストのサンプルが含まれます。有用なテキスト対テキストのサンプルには、ソースからターゲットへの翻訳ペア、記事から要約へのペア、質問から回答へのペアなどが含まれます。 ポートされたチェックポイントがダウンストリームタスクでファインチューニングされていない場合、モデルのテストに依存するだけで十分です。 モデルが完全に機能していることを確認するために、すべてのテストをGPU上で実行することもお勧めします。 モデルの内部テンソルに`.to(self.device)`ステートメントを追加するのを忘れる可能性があるため、そのようなテストではエラーが表示されることがあります。 GPUにアクセスできない場合、Hugging Faceチームが代わりにこれらのテストを実行できます。 **11. ドキュメントの追加** これで、*brand_new_bert*の必要なすべての機能が追加されました - ほぼ完了です!残りの追加すべきことは、良いドキュメントとドキュメントページです。 Cookiecutterが`docs/source/model_doc/brand_new_bert.md`というテンプレートファイルを追加しているはずで、これを記入する必要があります。 モデルのユーザーは通常、モデルを使用する前にまずこのページを見ます。したがって、ドキュメンテーションは理解しやすく簡潔である必要があります。 モデルの使用方法を示すためにいくつかの*Tips*を追加することはコミュニティにとって非常に役立ちます。ドキュメンテーションに関しては、Hugging Faceチームに問い合わせることをためらわないでください。 次に、`src/transformers/models/brand_new_bert/modeling_brand_new_bert.py`に追加されたドキュメンテーション文字列が正しいこと、およびすべての必要な入力および出力を含んでいることを確認してください。 ドキュメンテーションの書き方とドキュメンテーション文字列のフォーマットについて詳細なガイドが[こちら](writing-documentation)にあります。 ドキュメンテーションは通常、コミュニティとモデルの最初の接触点であるため、コードと同じくらい注意深く扱うべきであることを常に念頭に置いてください。 **コードのリファクタリング** 素晴らしい、これで*brand_new_bert*に必要なすべてのコードが追加されました。 この時点で、次のようなポテンシャルなコードスタイルの誤りを訂正するために以下を実行する必要があります: ```bash make style ``` あなたのコーディングスタイルが品質チェックをパスすることを確認してください: ```bash make quality ``` 🤗 Transformersの非常に厳格なデザインテストには、まだ合格していない可能性があるいくつかの他のテストが存在するかもしれません。 これは、ドキュメント文字列に情報が不足しているか、名前が間違っていることが原因であることが多いです。Hugging Faceチームは、ここで詰まっている場合には必ず助けてくれるでしょう。 最後に、コードが正しく機能することを確認した後、コードをリファクタリングするのは常に良いアイデアです。 すべてのテストがパスした今、追加したコードを再度確認してリファクタリングを行うのは良いタイミングです。 これでコーディングの部分は完了しました、おめでとうございます! 🎉 あなたは素晴らしいです! 😎 **12. モデルをモデルハブにアップロード** 最後のパートでは、すべてのチェックポイントをモデルハブに変換してアップロードし、各アップロードしたモデルチェックポイントにモデルカードを追加する必要があります。 モデルハブの機能について詳しくは、[Model sharing and uploading Page](model_sharing)を読んで理解できます。 ここでは、*brand_new_bert*の著者組織の下にモデルをアップロードできるように必要なアクセス権を取得するために、Hugging Faceチームと協力する必要があります。 `transformers`のすべてのモデルに存在する`push_to_hub`メソッドは、チェックポイントをハブにプッシュする迅速かつ効率的な方法です。 以下に、少しのコードスニペットを示します: ```python brand_new_bert.push_to_hub("brand_new_bert") # Uncomment the following line to push to an organization. # brand_new_bert.push_to_hub("<organization>/brand_new_bert") ``` 各チェックポイントに適切なモデルカードを作成する価値があります。モデルカードは、この特定のチェックポイントの特性をハイライトするべきです。例えば、このチェックポイントはどのデータセットで事前学習/ファインチューニングされたか、どのような下流タスクでモデルを使用すべきかを示すべきです。また、モデルの正しい使用方法に関するコードも含めるべきです。 **13.(オプション)ノートブックの追加** *brand_new_bert*を推論または下流タスクのファインチューニングにどのように詳細に使用できるかを示すノートブックを追加することは非常に役立ちます。これはあなたのPRをマージするために必須ではありませんが、コミュニティにとって非常に有用です。 **14. 完成したPRの提出** プログラミングが完了したら、最後のステップに移動し、PRをメインブランチにマージしましょう。通常、Hugging Faceチームはこの時点で既にあなたをサポートしているはずですが、PRに良い説明を追加し、コードにコメントを追加して、レビュアーに特定の設計の選択肢を指摘したい場合はコメントを追加することも価値があります。 ### Share your work!! さあ、コミュニティからあなたの作業に対する評価を得る時が来ました!モデルの追加を完了することは、TransformersおよびNLPコミュニティにとって重要な貢献です。あなたのコードとポートされた事前学習済みモデルは、何百人、何千人という開発者や研究者によって確実に使用されるでしょう。あなたの仕事に誇りを持ち、コミュニティとあなたの成果を共有しましょう。 **あなたはコミュニティの誰でも簡単にアクセスできる別のモデルを作成しました! 🤯**
transformers/docs/source/ja/add_new_model.md/0
{ "file_path": "transformers/docs/source/ja/add_new_model.md", "repo_id": "transformers", "token_count": 28026 }
269
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # DeepSpeed Integration [DeepSpeed](https://github.com/microsoft/DeepSpeed) は、[ZeRO 論文](https://arxiv.org/abs/1910.02054) で説明されているすべてを実装します。現在、次のものを完全にサポートしています。 1. オプティマイザーの状態分割 (ZeRO ステージ 1) 2. 勾配分割 (ZeRO ステージ 2) 3. パラメーターの分割 (ZeRO ステージ 3) 4. カスタム混合精度トレーニング処理 5. 一連の高速 CUDA 拡張ベースのオプティマイザー 6. CPU および NVMe への ZeRO オフロード ZeRO-Offload には独自の専用ペーパーがあります: [ZeRO-Offload: Democratizing Billion-Scale Model Training](https://arxiv.org/abs/2101.06840)。 NVMe サポートについては、論文 [ZeRO-Infinity: Breaking the GPU Memory Wall for Extreme Scale Deep Learning](https://arxiv.org/abs/2104.07857)。 DeepSpeed ZeRO-2 は、その機能が推論には役に立たないため、主にトレーニングのみに使用されます。 DeepSpeed ZeRO-3 は、巨大なモデルを複数の GPU にロードできるため、推論にも使用できます。 単一の GPU では不可能です。 🤗 Transformers は、2 つのオプションを介して [DeepSpeed](https://github.com/microsoft/DeepSpeed) を統合します。 1. [`Trainer`] によるコア DeepSpeed 機能の統合。何でもやってくれるタイプです 統合の場合 - カスタム構成ファイルを指定するか、テンプレートを使用するだけで、他に何もする必要はありません。たいていの このドキュメントではこの機能に焦点を当てています。 2. [`Trainer`] を使用せず、DeepSpeed を統合した独自のトレーナーを使用したい場合 `from_pretrained` や `from_config` などのコア機能には、重要な機能の統合が含まれています。 ZeRO ステージ 3 以降の `zero.Init`などの DeepSpeed の部分。この機能を活用するには、次のドキュメントをお読みください。 [非トレーナー DeepSpeed 統合](#nontrainer-deepspeed-integration)。 統合されているもの: トレーニング: 1. DeepSpeed ZeRO トレーニングは、ZeRO-Infinity (CPU および NVME オフロード) を使用して完全な ZeRO ステージ 1、2、および 3 をサポートします。 推論: 1. DeepSpeed ZeRO Inference は、ZeRO-Infinity による ZeRO ステージ 3 をサポートします。トレーニングと同じ ZeRO プロトコルを使用しますが、 オプティマイザと lr スケジューラは使用せず、ステージ 3 のみが関連します。詳細については、以下を参照してください。 [ゼロ推論](#zero-inference)。 DeepSpeed Inference もあります。これは、Tensor Parallelism の代わりに Tensor Parallelism を使用するまったく異なるテクノロジーです。 ZeRO (近日公開)。 <a id='deepspeed-trainer-integration'></a> ## Trainer Deepspeed Integration <a id='deepspeed-installation'></a> ### Installation pypi 経由でライブラリをインストールします。 ```bash pip install deepspeed ``` または`tansformers`, `extras`経由: ```bash pip install transformers[deepspeed] ``` または、[DeepSpeed の GitHub ページ](https://github.com/microsoft/deepspeed#installation) で詳細を確認してください。 [高度なインストール](https://www.deepspeed.ai/tutorials/advanced-install/)。 それでもビルドに苦労する場合は、まず [CUDA 拡張機能のインストール ノート](trainer#cuda-extension-installation-notes) を必ず読んでください。 拡張機能を事前ビルドせず、実行時に拡張機能がビルドされることに依存しており、上記の解決策をすべて試した場合 それが役に立たなかった場合、次に試すべきことは、モジュールをインストールする前にモジュールを事前にビルドすることです。 DeepSpeed のローカル ビルドを作成するには: ```bash git clone https://github.com/microsoft/DeepSpeed/ cd DeepSpeed rm -rf build TORCH_CUDA_ARCH_LIST="8.6" DS_BUILD_CPU_ADAM=1 DS_BUILD_UTILS=1 pip install . \ --global-option="build_ext" --global-option="-j8" --no-cache -v \ --disable-pip-version-check 2>&1 | tee build.log ``` NVMe オフロードを使用する場合は、上記の手順に`DS_BUILD_AIO=1`を含める必要があります (また、 *libaio-dev* システム全体にインストールします)。 `TORCH_CUDA_ARCH_LIST` を編集して、使用する GPU カードのアーキテクチャのコードを挿入します。すべてを仮定すると あなたのカードは同じで、次の方法でアーチを取得できます。 ```bash CUDA_VISIBLE_DEVICES=0 python -c "import torch; print(torch.cuda.get_device_capability())" ``` したがって、`8, 6`を取得した場合は、`TORCH_CUDA_ARCH_LIST="8.6"`を使用します。複数の異なるカードをお持ちの場合は、すべてをリストすることができます それらのうち、`TORCH_CUDA_ARCH_LIST="6.1;8.6"`が好きです 複数のマシンで同じセットアップを使用する必要がある場合は、バイナリ ホイールを作成します。 ```bash git clone https://github.com/microsoft/DeepSpeed/ cd DeepSpeed rm -rf build TORCH_CUDA_ARCH_LIST="8.6" DS_BUILD_CPU_ADAM=1 DS_BUILD_UTILS=1 \ python setup.py build_ext -j8 bdist_wheel ``` `dist/deepspeed-0.3.13+8cd046f-cp38-cp38-linux_x86_64.whl`のようなものが生成されるので、これをインストールできます `pip install deepspeed-0.3.13+8cd046f-cp38-cp38-linux_x86_64.whl`としてローカルまたは他のマシンにインストールします。 繰り返しますが、`TORCH_CUDA_ARCH_LIST`をターゲット アーキテクチャに合わせて調整することを忘れないでください。 NVIDIA GPU の完全なリストと、それに対応する **コンピューティング機能** (この記事の Arch と同じ) を見つけることができます。 コンテキスト) [ここ](https://developer.nvidia.com/cuda-gpus)。 以下を使用して、pytorch が構築されたアーチを確認できます。 ```bash python -c "import torch; print(torch.cuda.get_arch_list())" ``` ここでは、インストールされている GPU の 1 つのアーチを見つける方法を説明します。たとえば、GPU 0 の場合: ```bash CUDA_VISIBLE_DEVICES=0 python -c "import torch; \ print(torch.cuda.get_device_properties(torch.device('cuda')))" ``` 出力が次の場合: ```bash _CudaDeviceProperties(name='GeForce RTX 3090', major=8, minor=6, total_memory=24268MB, multi_processor_count=82) ``` そうすれば、このカードのアーチが`8.6`であることがわかります。 `TORCH_CUDA_ARCH_LIST` を完全に省略することもできます。そうすれば、ビルド プログラムが自動的にクエリを実行します。 ビルドが行われる GPU のアーキテクチャ。これは、ターゲット マシンの GPU と一致する場合もあれば、一致しない場合もあります。 目的のアーチを明示的に指定することをお勧めします。 提案されたことをすべて試してもまだビルドの問題が発生する場合は、GitHub の問題に進んでください。 [ディープスピード](https://github.com/microsoft/DeepSpeed/issues)、 <a id='deepspeed-multi-gpu'></a> ### Deployment with multiple GPUs DeepSpeed 統合をデプロイするには、[`Trainer`] コマンド ライン引数を調整して新しい引数 `--deepspeed ds_config.json` を含めます。ここで、`ds_config.json` は DeepSpeed 構成ファイルです。 [こちら](https://www.deepspeed.ai/docs/config-json/)に記載されています。ファイル名はあなた次第です。 DeepSpeed の`add_config_arguments`ユーティリティを使用して、必要なコマンド ライン引数をコードに追加することをお勧めします。 詳細については、[DeepSpeed の引数解析](https://deepspeed.readthedocs.io/en/latest/initialize.html#argument-parsing) ドキュメントを参照してください。 ここで選択したランチャーを使用できます。 pytorch ランチャーを引き続き使用できます。 ```bash torch.distributed.run --nproc_per_node=2 your_program.py <normal cl args> --deepspeed ds_config.json ``` または、`deepspeed`によって提供されるランチャーを使用します。 ```bash deepspeed --num_gpus=2 your_program.py <normal cl args> --deepspeed ds_config.json ``` ご覧のとおり、引数は同じではありませんが、ほとんどのニーズではどちらでも機能します。の さまざまなノードと GPU を構成する方法の詳細については、[こちら](https://www.deepspeed.ai/getting-started/#resource-configuration-multi-node) を参照してください。 `deepspeed`ランチャーを使用し、利用可能なすべての GPU を使用したい場合は、`--num_gpus`フラグを省略するだけです。 以下は、利用可能なすべての GPU をデプロイする DeepSpeed で`run_translation.py`を実行する例です。 ```bash deepspeed examples/pytorch/translation/run_translation.py \ --deepspeed tests/deepspeed/ds_config_zero3.json \ --model_name_or_path google-t5/t5-small --per_device_train_batch_size 1 \ --output_dir output_dir --overwrite_output_dir --fp16 \ --do_train --max_train_samples 500 --num_train_epochs 1 \ --dataset_name wmt16 --dataset_config "ro-en" \ --source_lang en --target_lang ro ``` DeepSpeed のドキュメントには、`--deepspeed --deepspeed_config ds_config.json`が表示される可能性が高いことに注意してください。 DeepSpeed 関連の引数が 2 つありますが、簡単にするためであり、処理すべき引数がすでに非常に多いためです。 この 2 つを 1 つの引数に結合しました。 実際の使用例については、この [投稿](https://github.com/huggingface/transformers/issues/8771#issuecomment-759248400) を参照してください。 <a id='deepspeed-one-gpu'></a> ### Deployment with one GPU 1 つの GPU で DeepSpeed をデプロイするには、[`Trainer`] コマンド ライン引数を次のように調整します。 ```bash deepspeed --num_gpus=1 examples/pytorch/translation/run_translation.py \ --deepspeed tests/deepspeed/ds_config_zero2.json \ --model_name_or_path google-t5/t5-small --per_device_train_batch_size 1 \ --output_dir output_dir --overwrite_output_dir --fp16 \ --do_train --max_train_samples 500 --num_train_epochs 1 \ --dataset_name wmt16 --dataset_config "ro-en" \ --source_lang en --target_lang ro ``` これは複数の GPU の場合とほぼ同じですが、ここでは、DeepSpeed に 1 つの GPU だけを使用するように明示的に指示します。 `--num_gpus=1`。デフォルトでは、DeepSpeed は指定されたノード上で認識できるすべての GPU をデプロイします。起動する GPU が 1 つだけの場合 の場合、この引数は必要ありません。次の [ドキュメント](https://www.deepspeed.ai/getting-started/#resource-configuration-multi-node) では、ランチャー オプションについて説明しています。 1 つの GPU だけで DeepSpeed を使用したいのはなぜですか? 1. 一部の計算とメモリをホストの CPU と RAM に委任できる ZeRO オフロード機能を備えているため、 モデルのニーズに合わせてより多くの GPU リソースを残しておきます。より大きなバッチ サイズ、または非常に大きなモデルのフィッティングを可能にする 普通は合わないでしょう。 2. スマートな GPU メモリ管理システムを提供し、メモリの断片化を最小限に抑えます。 より大きなモデルとデータ バッチ。 次に構成について詳しく説明しますが、単一の GPU で大幅な改善を実現するための鍵は次のとおりです。 DeepSpeed を使用するには、構成ファイルに少なくとも次の構成が必要です。 ```json { "zero_optimization": { "stage": 2, "offload_optimizer": { "device": "cpu", "pin_memory": true }, "allgather_partitions": true, "allgather_bucket_size": 2e8, "reduce_scatter": true, "reduce_bucket_size": 2e8, "overlap_comm": true, "contiguous_gradients": true } } ``` これにより、オプティマイザーのオフロードやその他の重要な機能が有効になります。バッファ サイズを試してみるとよいでしょう。 詳細については、以下のディスカッションを参照してください。 このタイプのデプロイメントの実際的な使用例については、この [投稿](https://github.com/huggingface/transformers/issues/8771#issuecomment-759176685) を参照してください。 このドキュメントで詳しく説明されているように、CPU および NVMe オフロードを備えた ZeRO-3 を試すこともできます。 ノート: - GPU 0 とは異なる特定の GPU で実行する必要がある場合、`CUDA_VISIBLE_DEVICES` を使用して制限することはできません。 利用可能な GPU の表示範囲。代わりに、次の構文を使用する必要があります。 ```bash deepspeed --include localhost:1 examples/pytorch/translation/run_translation.py ... ``` この例では、DeepSpeed に GPU 1 (2 番目の GPU) を使用するように指示します。 <a id='deepspeed-multi-node'></a> ### 複数のノードを使用したデプロイメント このセクションの情報は DeepSpeed 統合に固有のものではなく、あらゆるマルチノード プログラムに適用できます。ただし、DeepSpeed は、SLURM 環境でない限り、他のランチャーよりも使いやすい`deepspeed`ランチャーを提供します。 このセクションでは、それぞれ 8 GPU を備えた 2 つのノードがあると仮定します。また、最初のノードには `ssh hostname1` を使用して、2 番目のノードには `ssh hostname2` を使用して接続できます。両方ともパスワードなしでローカルの ssh 経由で相互に接続できる必要があります。もちろん、これらのホスト (ノード) 名を、作業している実際のホスト名に変更する必要があります。 #### The torch.distributed.run launcher たとえば、`torch.distributed.run` を使用するには、次のようにします。 ```bash python -m torch.distributed.run --nproc_per_node=8 --nnode=2 --node_rank=0 --master_addr=hostname1 \ --master_port=9901 your_program.py <normal cl args> --deepspeed ds_config.json ``` 各ノードに SSH で接続し、それぞれのノードで同じコマンドを実行する必要があります。急ぐ必要はありません。ランチャーは両方のノードが同期するまで待機します。 詳細については、[torchrun](https://pytorch.org/docs/stable/elastic/run.html) を参照してください。ちなみに、これは pytorch の数バージョン前の`torch.distributed.launch`を置き換えたランチャーでもあります。 #### ディープスピード ランチャー 代わりに`deepspeed`ランチャーを使用するには、まず`hostfile`ファイルを作成する必要があります。 ``` hostname1 slots=8 hostname2 slots=8 ``` そして、次のように起動できます。 ```bash deepspeed --num_gpus 8 --num_nodes 2 --hostfile hostfile --master_addr hostname1 --master_port=9901 \ your_program.py <normal cl args> --deepspeed ds_config.json ``` `torch.distributed.run`ランチャーとは異なり、`deepspeed`は両方のノードでこのコマンドを自動的に起動します。 詳細については、[リソース構成 (マルチノード)](https://www.deepspeed.ai/getting-started/#resource-configuration-multi-node) を参照してください。 #### Launching in a SLURM environment SLURM 環境では、次のアプローチを使用できます。以下は、特定の SLURM 環境に適合させるために必要な slurm スクリプト `launch.slurm` です。 ```bash #SBATCH --job-name=test-nodes # name #SBATCH --nodes=2 # nodes #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! #SBATCH --cpus-per-task=10 # number of cores per tasks #SBATCH --gres=gpu:8 # number of gpus #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) #SBATCH --output=%x-%j.out # output file name export GPUS_PER_NODE=8 export MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) export MASTER_PORT=9901 srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ your_program.py <normal cl args> --deepspeed ds_config.json' ``` あとは実行をスケジュールするだけです。 ```bash sbatch launch.slurm ``` #### Use of Non-shared filesystem デフォルトでは、DeepSpeed はマルチノード環境が共有ストレージを使用することを想定しています。これが当てはまらず、各ノードがローカル ファイルシステムしか参照できない場合は、設定ファイルを調整して [`checkpoint`_section](https://www.deepspeed.ai/docs/config-json/#) を含める必要があります。チェックポイント オプション) を次の設定で指定します。 ```json { "checkpoint": { "use_node_local_storage": true } } ``` あるいは、[`Trainer`] の `--save_on_each_node` 引数を使用することもでき、上記の設定は自動的に追加されます。 <a id='deepspeed-notebook'></a> ### Deployment in Notebooks ノートブックのセルをスクリプトとして実行する場合の問題は、依存する通常の`deepspeed`ランチャーがないことです。 特定の設定では、それをエミュレートする必要があります。 GPU を 1 つだけ使用している場合、DeepSpeed を使用するためにノートブック内のトレーニング コードを調整する必要がある方法は次のとおりです。 ```python # DeepSpeed requires a distributed environment even when only one process is used. # This emulates a launcher in the notebook import os os.environ["MASTER_ADDR"] = "localhost" os.environ["MASTER_PORT"] = "9994" # modify if RuntimeError: Address already in use os.environ["RANK"] = "0" os.environ["LOCAL_RANK"] = "0" os.environ["WORLD_SIZE"] = "1" # Now proceed as normal, plus pass the deepspeed config file training_args = TrainingArguments(..., deepspeed="ds_config_zero3.json") trainer = Trainer(...) trainer.train() ``` 注: `...` は、関数に渡す通常の引数を表します。 複数の GPU を使用する場合、DeepSpeed が動作するにはマルチプロセス環境を使用する必要があります。つまり、あなたは持っています その目的でランチャーを使用することはできませんが、これは、提示された分散環境をエミュレートすることによっては実現できません。 このセクションの冒頭で。 現在のディレクトリのノートブックにその場で構成ファイルを作成したい場合は、専用の セルの内容: ```python no-style %%bash cat <<'EOT' > ds_config_zero3.json { "fp16": { "enabled": "auto", "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 }, "optimizer": { "type": "AdamW", "params": { "lr": "auto", "betas": "auto", "eps": "auto", "weight_decay": "auto" } }, "scheduler": { "type": "WarmupLR", "params": { "warmup_min_lr": "auto", "warmup_max_lr": "auto", "warmup_num_steps": "auto" } }, "zero_optimization": { "stage": 3, "offload_optimizer": { "device": "cpu", "pin_memory": true }, "offload_param": { "device": "cpu", "pin_memory": true }, "overlap_comm": true, "contiguous_gradients": true, "sub_group_size": 1e9, "reduce_bucket_size": "auto", "stage3_prefetch_bucket_size": "auto", "stage3_param_persistence_threshold": "auto", "stage3_max_live_parameters": 1e9, "stage3_max_reuse_distance": 1e9, "stage3_gather_16bit_weights_on_model_save": true }, "gradient_accumulation_steps": "auto", "gradient_clipping": "auto", "steps_per_print": 2000, "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "wall_clock_breakdown": false } EOT ``` トレーニング スクリプトがノートブックのセルではなく通常のファイルにある場合は、次のようにして`deepspeed`を通常どおり起動できます。 細胞からのシェル。たとえば、`run_translation.py` を使用するには、次のように起動します。 ```python no-style !git clone https://github.com/huggingface/transformers !cd transformers; deepspeed examples/pytorch/translation/run_translation.py ... ``` または、`%%bash` マジックを使用すると、シェル プログラムを実行するための複数行のコードを記述することができます。 ```python no-style %%bash git clone https://github.com/huggingface/transformers cd transformers deepspeed examples/pytorch/translation/run_translation.py ... ``` そのような場合、このセクションの最初に示したコードは必要ありません。 注: `%%bash` マジックは優れていますが、現時点では出力をバッファリングするため、プロセスが終了するまでログは表示されません。 完了します。 <a id='deepspeed-config'></a> ### Configuration 設定ファイルで使用できる DeepSpeed 設定オプションの完全なガイドについては、次を参照してください。 [次のドキュメント](https://www.deepspeed.ai/docs/config-json/) にアクセスしてください。 さまざまな実際のニーズに対応する数十の DeepSpeed 構成例を [DeepSpeedExamples](https://github.com/microsoft/DeepSpeedExamples)で見つけることができます。 リポジトリ: ```bash git clone https://github.com/microsoft/DeepSpeedExamples cd DeepSpeedExamples find . -name '*json' ``` 上記のコードを続けて、Lamb オプティマイザーを構成しようとしているとします。したがって、次の中から検索できます `.json` ファイルの例: ```bash grep -i Lamb $(find . -name '*json') ``` さらにいくつかの例が [メイン リポジトリ](https://github.com/microsoft/DeepSpeed) にもあります。 DeepSpeed を使用する場合は、常に DeepSpeed 構成ファイルを指定する必要がありますが、一部の構成パラメータには コマンドライン経由で設定します。微妙な違いについては、このガイドの残りの部分で説明します。 DeepSpeed 構成ファイルがどのようなものかを理解するために、ZeRO ステージ 2 機能を有効にする構成ファイルを次に示します。 オプティマイザー状態の CPU オフロードを含み、`AdamW`オプティマイザーと`WarmupLR`スケジューラーを使用し、混合を有効にします。 `--fp16` が渡された場合の精度トレーニング: ```json { "fp16": { "enabled": "auto", "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 }, "optimizer": { "type": "AdamW", "params": { "lr": "auto", "betas": "auto", "eps": "auto", "weight_decay": "auto" } }, "scheduler": { "type": "WarmupLR", "params": { "warmup_min_lr": "auto", "warmup_max_lr": "auto", "warmup_num_steps": "auto" } }, "zero_optimization": { "stage": 2, "offload_optimizer": { "device": "cpu", "pin_memory": true }, "allgather_partitions": true, "allgather_bucket_size": 2e8, "overlap_comm": true, "reduce_scatter": true, "reduce_bucket_size": 2e8, "contiguous_gradients": true }, "gradient_accumulation_steps": "auto", "gradient_clipping": "auto", "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", } ``` プログラムを実行すると、DeepSpeed は [`Trainer`] から受け取った設定をログに記録します。 コンソールに渡されるため、最終的にどのような設定が渡されたのかを正確に確認できます。 <a id='deepspeed-config-passing'></a> ### Passing Configuration このドキュメントで説明したように、通常、DeepSpeed 設定は json ファイルへのパスとして渡されますが、 トレーニングの設定にコマンド ライン インターフェイスを使用せず、代わりにインスタンスを作成します。 [`Trainer`] via [`TrainingArguments`] その後、`deepspeed` 引数については次のことができます ネストされた `dict` を渡します。これにより、その場で構成を作成でき、それを書き込む必要がありません。 [`TrainingArguments`] に渡す前にファイル システムを変更します。 要約すると、次のことができます。 ```python TrainingArguments(..., deepspeed="/path/to/ds_config.json") ``` または: ```python ds_config_dict = dict(scheduler=scheduler_params, optimizer=optimizer_params) TrainingArguments(..., deepspeed=ds_config_dict) ``` <a id='deepspeed-config-shared'></a> ### Shared Configuration <Tip warning={true}> このセクションは必読です </Tip> [`Trainer`] と DeepSpeed の両方が正しく機能するには、いくつかの設定値が必要です。 したがって、検出が困難なエラーにつながる可能性のある定義の競合を防ぐために、それらを構成することにしました。 [`Trainer`] コマンドライン引数経由。 さらに、一部の構成値はモデルの構成に基づいて自動的に導出されます。 複数の値を手動で調整することを忘れないでください。[`Trainer`] に大部分を任せるのが最善です の設定を行います。 したがって、このガイドの残りの部分では、特別な設定値 `auto` が表示されます。これを設定すると、 正しい値または最も効率的な値に自動的に置き換えられます。これを無視することを自由に選択してください 推奨事項を参照し、値を明示的に設定します。この場合、次の点に十分注意してください。 [`Trainer`] 引数と DeepSpeed 設定は一致します。たとえば、同じものを使用していますか 学習率、バッチサイズ、または勾配累積設定?これらが一致しない場合、トレーニングは非常に失敗する可能性があります 方法を検出するのが難しい。あなたは警告を受けました。 DeepSpeed のみに固有の値や、それに合わせて手動で設定する必要がある値が他にも複数あります。 あなたの要望。 独自のプログラムで、DeepSpeed 構成をマスターとして変更したい場合は、次のアプローチを使用することもできます。 それに基づいて [`TrainingArguments`] を設定します。手順は次のとおりです。 1. マスター構成として使用する DeepSpeed 構成を作成またはロードします 2. これらの値に基づいて [`TrainingArguments`] オブジェクトを作成します `scheduler.params.total_num_steps`などの一部の値は次のように計算されることに注意してください。 `train` 中に [`Trainer`] を実行しますが、もちろん自分で計算することもできます。 <a id='deepspeed-zero'></a> ### ZeRO [Zero Redundancy Optimizer (ZeRO)](https://www.deepspeed.ai/tutorials/zero/) は、DeepSpeed の主力製品です。それ 3 つの異なるレベル (段階) の最適化をサポートします。最初のものは、スケーラビリティの観点からはあまり興味深いものではありません。 したがって、このドキュメントではステージ 2 と 3 に焦点を当てます。ステージ 3 は、最新の ZeRO-Infinity の追加によってさらに改善されています。 詳細については、DeepSpeed のドキュメントを参照してください。 構成ファイルの `zero_optimization` セクションは最も重要な部分です ([docs](https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training))。ここで定義します どの ZeRO ステージを有効にするか、そしてそれらをどのように構成するか。各パラメータの説明は、 DeepSpeed のドキュメント。 このセクションは、DeepSpeed 設定を介してのみ設定する必要があります - [`Trainer`] が提供します 同等のコマンドライン引数はありません。 注: 現在、DeepSpeed はパラメーター名を検証しないため、スペルを間違えると、デフォルト設定が使用されます。 スペルが間違っているパラメータ。 DeepSpeed エンジンの起動ログ メッセージを見て、その値を確認できます。 使用するつもりです。 <a id='deepspeed-zero2-config'></a> #### ZeRO-2 Config 以下は、ZeRO ステージ 2 の構成例です。 ```json { "zero_optimization": { "stage": 2, "offload_optimizer": { "device": "cpu", "pin_memory": true }, "allgather_partitions": true, "allgather_bucket_size": 5e8, "overlap_comm": true, "reduce_scatter": true, "reduce_bucket_size": 5e8, "contiguous_gradients": true } } ``` **性能調整:** - `offload_optimizer` を有効にすると、GPU RAM の使用量が削減されます (`"stage": 2` が必要です) - `"overlap_comm": true` は、GPU RAM 使用量の増加とトレードオフして、遅延をすべて削減します。 `overlap_comm`は 4.5x を使用します `allgather_bucket_size`と`reduce_bucket_size`の値。したがって、5e8 に設定されている場合、9GB が必要になります。 フットプリント (`5e8 x 2Bytes x 2 x 4.5`)。したがって、8GB 以下の RAM を搭載した GPU を使用している場合、 OOM エラーが発生した場合は、これらのパラメータを`2e8`程度に減らす必要があり、それには 3.6GB が必要になります。やりたくなるでしょう OOM に達し始めている場合は、より大容量の GPU でも同様です。 - これらのバッファを減らすと、より多くの GPU RAM を利用するために通信速度を犠牲にすることになります。バッファサイズが小さいほど、 通信が遅くなり、他のタスクで使用できる GPU RAM が増えます。したがって、バッチサイズが大きい場合は、 重要なのは、トレーニング時間を少し遅らせることは良いトレードになる可能性があります。 さらに、`deepspeed==0.4.4`には、次のコマンドで有効にできる新しいオプション`round_robin_gradients`が追加されました。 ```json { "zero_optimization": { "round_robin_gradients": true } } ``` これは、きめ細かい勾配パーティショニングによってランク間の CPU メモリへの勾配コピーを並列化する、CPU オフロードのステージ 2 最適化です。パフォーマンスの利点は、勾配累積ステップ (オプティマイザー ステップ間のコピーの増加) または GPU 数 (並列処理の増加) に応じて増加します。 <a id='deepspeed-zero3-config'></a> #### ZeRO-3 Config 以下は、ZeRO ステージ 3 の構成例です。 ```json { "zero_optimization": { "stage": 3, "offload_optimizer": { "device": "cpu", "pin_memory": true }, "offload_param": { "device": "cpu", "pin_memory": true }, "overlap_comm": true, "contiguous_gradients": true, "sub_group_size": 1e9, "reduce_bucket_size": "auto", "stage3_prefetch_bucket_size": "auto", "stage3_param_persistence_threshold": "auto", "stage3_max_live_parameters": 1e9, "stage3_max_reuse_distance": 1e9, "stage3_gather_16bit_weights_on_model_save": true } } ``` モデルまたはアクティベーションが GPU メモリに適合せず、CPU が未使用であるために OOM が発生している場合 `"device": "cpu"` を使用してオプティマイザの状態とパラメータを CPU メモリにメモリオフロードすると、この制限が解決される可能性があります。 CPU メモリにオフロードしたくない場合は、`device`エントリに`cpu`の代わりに`none`を使用します。オフロード先 NVMe については後ほど説明します。 固定メモリは、`pin_memory`を`true`に設定すると有効になります。この機能により、次のようなコストをかけてスループットを向上させることができます。 他のプロセスが使用できるメモリが少なくなります。ピン留めされたメモリは、それを要求した特定のプロセスのために確保されます。 通常、通常の CPU メモリよりもはるかに高速にアクセスされます。 **性能調整:** - `stage3_max_live_parameters`: `1e9` - `stage3_max_reuse_distance`: `1e9` OOM に達した場合は、「stage3_max_live_parameters」と「stage3_max_reuse_ distance」を減らします。影響は最小限に抑えられるはずです アクティブ化チェックポイントを実行しない限り、パフォーマンスに影響します。 `1e9`は約 2GB を消費します。記憶を共有しているのは、 `stage3_max_live_parameters` と `stage3_max_reuse_distance` なので、加算されるものではなく、合計で 2GB になります。 `stage3_max_live_parameters` は、特定の時点で GPU 上に保持する完全なパラメータの数の上限です。 時間。 「再利用距離」は、パラメータが将来いつ再び使用されるかを判断するために使用する指標です。 `stage3_max_reuse_ distance`を使用して、パラメータを破棄するか保持するかを決定します。パラメータが 近い将来に再び使用される予定 (`stage3_max_reuse_distance`未満) なので、通信を減らすために保持します。 オーバーヘッド。これは、アクティベーション チェックポイントを有効にしている場合に非常に役立ちます。フォワード再計算が行われ、 backward は単一レイヤー粒度を渡し、後方再計算までパラメータを前方再計算に保持したいと考えています。 次の構成値は、モデルの非表示サイズによって異なります。 - `reduce_bucket_size`: `hidden_size*hidden_size` - `stage3_prefetch_bucket_size`: `0.9 * hidden_size * hidden_size` - `stage3_param_persistence_threshold`: `10 * hidden_size` したがって、これらの値を `auto` に設定すると、[`Trainer`] が推奨される値を自動的に割り当てます。 価値観。ただし、もちろん、これらを明示的に設定することもできます。 `stage3_gather_16bit_weights_on_model_save` は、モデルの保存時にモデル fp16 の重み統合を有効にします。大きい モデルと複数の GPU の場合、これはメモリと速度の両方の点で高価な操作です。現在必須となっているのは、 トレーニングを再開する予定です。この制限を取り除き、より便利にする今後のアップデートに注目してください。 フレキシブル。 ZeRO-2 構成から移行している場合は、`allgather_partitions`、`allgather_bucket_size`、および `reduce_scatter`設定パラメータは ZeRO-3 では使用されません。これらを設定ファイルに保存しておくと、 無視される。 - `sub_group_size`: `1e9` `sub_group_size` は、オプティマイザーのステップ中にパラメーターが更新される粒度を制御します。パラメータは次のとおりです。 `sub_group_size` のバケットにグループ化され、各バケットは一度に 1 つずつ更新されます。 NVMeオフロードで使用する場合 したがって、ZeRO-Infinity の `sub_group_size`は、モデルの状態が CPU に出入りする粒度を制御します。 オプティマイザステップ中に NVMe からメモリを取得します。これにより、非常に大規模なモデルの CPU メモリ不足が防止されます。 NVMe オフロードを使用しない場合は、`sub_group_size`をデフォルト値の *1e9* のままにすることができます。変更することもできます 次の場合のデフォルト値: 1. オプティマイザー ステップ中に OOM が発生する: `sub_group_size` を減らして、一時バッファーのメモリ使用量を削減します。 2. オプティマイザー ステップに時間がかかります。`sub_group_size`を増やして、帯域幅の使用率を向上させます。 データバッファの増加。 #### ZeRO-0 Config ステージ 0 と 1 はめったに使用されないため、最後にリストしていることに注意してください。 ステージ 0 では、すべてのタイプのシャーディングを無効にし、DDP として DeepSpeed のみを使用します。次のコマンドでオンにできます。 ```json { "zero_optimization": { "stage": 0 } } ``` これにより、他に何も変更する必要がなく、基本的に ZeRO が無効になります。 #### ZeRO-1 Config ステージ 1 は、ステージ 2 からグラデーション シャーディングを除いたものです。オプティマイザーの状態をシャード化するだけで、処理を少し高速化するためにいつでも試すことができます。 ```json { "zero_optimization": { "stage": 1 } } ``` <a id='deepspeed-nvme'></a> ### NVMe Support ZeRO-Infinity は、GPU と CPU メモリを NVMe メモリで拡張することで、非常に大規模なモデルのトレーニングを可能にします。おかげで スマート パーティショニングおよびタイリング アルゴリズムでは、各 GPU が非常に少量のデータを送受信する必要があります。 オフロードにより、最新の NVMe がトレーニングに利用できる合計メモリ プールをさらに大きくするのに適していることが判明しました。 プロセス。 ZeRO-Infinity には、ZeRO-3 が有効になっている必要があります。 次の設定例では、NVMe がオプティマイザの状態とパラメータの両方をオフロードできるようにします。 ```json { "zero_optimization": { "stage": 3, "offload_optimizer": { "device": "nvme", "nvme_path": "/local_nvme", "pin_memory": true, "buffer_count": 4, "fast_init": false }, "offload_param": { "device": "nvme", "nvme_path": "/local_nvme", "pin_memory": true, "buffer_count": 5, "buffer_size": 1e8, "max_in_cpu": 1e9 }, "aio": { "block_size": 262144, "queue_depth": 32, "thread_count": 1, "single_submit": false, "overlap_events": true }, "overlap_comm": true, "contiguous_gradients": true, "sub_group_size": 1e9, "reduce_bucket_size": "auto", "stage3_prefetch_bucket_size": "auto", "stage3_param_persistence_threshold": "auto", "stage3_max_live_parameters": 1e9, "stage3_max_reuse_distance": 1e9, "stage3_gather_16bit_weights_on_model_save": true }, } ``` オプティマイザの状態とパラメータの両方を NVMe にオフロードするか、どちらか 1 つだけをオフロードするか、まったくオフロードしないかを選択できます。たとえば、次の場合 利用可能な CPU メモリが大量にある場合は、高速になるため、必ず CPU メモリのみにオフロードしてください (ヒント: *"device": "CPU"*)。 [オプティマイザーの状態](https://www.deepspeed.ai/docs/config-json/#optimizer-offloading) と [パラメーター](https://www.deepspeed.ai/docs/config-json/#parameter-offloading)。 `nvme_path`が実際に NVMe であることを確認してください。NVMe は通常のハードドライブまたは SSD で動作しますが、 はるかに遅くなります。高速スケーラブルなトレーニングは、最新の NVMe 転送速度を念頭に置いて設計されました (この時点では 書き込みでは、読み取り最大 3.5 GB/秒、書き込み最大 3 GB/秒のピーク速度が得られます)。 最適な`aio`構成ブロックを見つけるには、ターゲット設定でベンチマークを実行する必要があります。 [ここで説明](https://github.com/microsoft/DeepSpeed/issues/998)。 <a id='deepspeed-zero2-zero3-performance'></a> #### ZeRO-2 vs ZeRO-3 Performance ZeRO-3 は、他のすべてが同じように構成されている場合、ZeRO-2 よりも遅くなる可能性があります。前者は収集する必要があるためです。 ZeRO-2 の機能に加えてモデルの重み付けを行います。 ZeRO-2 がニーズを満たし、数個の GPU を超えて拡張する必要がない場合 そうすれば、それに固執することを選択することもできます。 ZeRO-3 により、はるかに高いスケーラビリティ容量が可能になることを理解することが重要です スピードを犠牲にして。 ZeRO-3 の構成を調整して、ZeRO-2 に近づけることができます。 - `stage3_param_persistence_threshold` を非常に大きな数値に設定します。たとえば、`6 * hidden_​​size * hidden_​​size` のように、最大​​パラメータよりも大きくなります。これにより、パラメータが GPU に保持されます。 - ZeRO-2 にはそのオプションがないため、`offload_params` をオフにします。 変更しなくても、`offload_params`をオフにするだけでパフォーマンスが大幅に向上する可能性があります。 `stage3_param_persistence_threshold`。もちろん、これらの変更はトレーニングできるモデルのサイズに影響します。それで これらは、ニーズに応じて、スケーラビリティと引き換えに速度を向上させるのに役立ちます。 <a id='deepspeed-zero2-example'></a> #### ZeRO-2 Example 以下は、完全な ZeRO-2 自動構成ファイル `ds_config_zero2.json` です。 ```json { "fp16": { "enabled": "auto", "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 }, "optimizer": { "type": "AdamW", "params": { "lr": "auto", "betas": "auto", "eps": "auto", "weight_decay": "auto" } }, "scheduler": { "type": "WarmupLR", "params": { "warmup_min_lr": "auto", "warmup_max_lr": "auto", "warmup_num_steps": "auto" } }, "zero_optimization": { "stage": 2, "offload_optimizer": { "device": "cpu", "pin_memory": true }, "allgather_partitions": true, "allgather_bucket_size": 2e8, "overlap_comm": true, "reduce_scatter": true, "reduce_bucket_size": 2e8, "contiguous_gradients": true }, "gradient_accumulation_steps": "auto", "gradient_clipping": "auto", "steps_per_print": 2000, "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "wall_clock_breakdown": false } ``` 以下は、手動で設定された完全な ZeRO-2 のすべてが有効な構成ファイルです。ここでは主に、典型的なものを確認するためのものです。 値は次のようになりますが、複数の`auto`設定が含まれる値を使用することを強くお勧めします。 ```json { "fp16": { "enabled": true, "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 }, "optimizer": { "type": "AdamW", "params": { "lr": 3e-5, "betas": [0.8, 0.999], "eps": 1e-8, "weight_decay": 3e-7 } }, "scheduler": { "type": "WarmupLR", "params": { "warmup_min_lr": 0, "warmup_max_lr": 3e-5, "warmup_num_steps": 500 } }, "zero_optimization": { "stage": 2, "offload_optimizer": { "device": "cpu", "pin_memory": true }, "allgather_partitions": true, "allgather_bucket_size": 2e8, "overlap_comm": true, "reduce_scatter": true, "reduce_bucket_size": 2e8, "contiguous_gradients": true }, "steps_per_print": 2000, "wall_clock_breakdown": false } ``` <a id='deepspeed-zero3-example'></a> #### ZeRO-3 Example 以下は、完全な ZeRO-3 自動構成ファイル`ds_config_zero3.json`です。 ```json { "fp16": { "enabled": "auto", "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 }, "optimizer": { "type": "AdamW", "params": { "lr": "auto", "betas": "auto", "eps": "auto", "weight_decay": "auto" } }, "scheduler": { "type": "WarmupLR", "params": { "warmup_min_lr": "auto", "warmup_max_lr": "auto", "warmup_num_steps": "auto" } }, "zero_optimization": { "stage": 3, "offload_optimizer": { "device": "cpu", "pin_memory": true }, "offload_param": { "device": "cpu", "pin_memory": true }, "overlap_comm": true, "contiguous_gradients": true, "sub_group_size": 1e9, "reduce_bucket_size": "auto", "stage3_prefetch_bucket_size": "auto", "stage3_param_persistence_threshold": "auto", "stage3_max_live_parameters": 1e9, "stage3_max_reuse_distance": 1e9, "stage3_gather_16bit_weights_on_model_save": true }, "gradient_accumulation_steps": "auto", "gradient_clipping": "auto", "steps_per_print": 2000, "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "wall_clock_breakdown": false } ``` 以下は、手動で設定された完全な ZeRO-3 のすべてが有効な構成ファイルです。ここでは主に、典型的なものを確認するためのものです。 値は次のようになりますが、複数の`auto`設定が含まれる値を使用することを強くお勧めします。 ```json { "fp16": { "enabled": true, "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 }, "optimizer": { "type": "AdamW", "params": { "lr": 3e-5, "betas": [0.8, 0.999], "eps": 1e-8, "weight_decay": 3e-7 } }, "scheduler": { "type": "WarmupLR", "params": { "warmup_min_lr": 0, "warmup_max_lr": 3e-5, "warmup_num_steps": 500 } }, "zero_optimization": { "stage": 3, "offload_optimizer": { "device": "cpu", "pin_memory": true }, "offload_param": { "device": "cpu", "pin_memory": true }, "overlap_comm": true, "contiguous_gradients": true, "sub_group_size": 1e9, "reduce_bucket_size": 1e6, "stage3_prefetch_bucket_size": 0.94e6, "stage3_param_persistence_threshold": 1e4, "stage3_max_live_parameters": 1e9, "stage3_max_reuse_distance": 1e9, "stage3_gather_16bit_weights_on_model_save": true }, "steps_per_print": 2000, "wall_clock_breakdown": false } ``` #### How to Choose Which ZeRO Stage and Offloads To Use For Best Performance これで、さまざまな段階があることがわかりました。どちらを使用するかをどのように決定すればよいでしょうか?このセクションでは、この質問に答えていきます。 一般に、次のことが当てはまります。 - 速度の点(左の方が右より速い) ステージ 0 (DDP) > ステージ 1 > ステージ 2 > ステージ 2 + オフロード > ステージ 3 > ステージ 3 + オフロード - GPU メモリの使用状況 (右は左よりも GPU メモリ効率が高い) ステージ 0 (DDP) < ステージ 1 < ステージ 2 < ステージ 2 + オフロード < ステージ 3 < ステージ 3 + オフロード したがって、最小限の数の GPU に収まりながら最速の実行を実現したい場合は、次のプロセスに従うことができます。最も速いアプローチから開始し、GPU OOM に陥った場合は、次に遅いアプローチに進みますが、これにより使用される GPU メモリが少なくなります。などなど。 まず、バッチ サイズを 1 に設定します (必要な有効バッチ サイズに対して、いつでも勾配累積を使用できます)。 1. `--gradient_checkpointing 1` (HF Trainer) または直接 `model.gradient_checkpointing_enable()` を有効にします - OOM の場合 2. 最初に ZeRO ステージ 2 を試してください。 OOMの場合 3. ZeRO ステージ 2 + `offload_optimizer` を試します - OOM の場合 4. ZeRO ステージ 3 に切り替える - OOM の場合 5. `cpu` に対して `offload_param` を有効にします - OOM の場合 6. OOM の場合は、`cpu`に対して`offload_optimizer`を有効にします。 7. それでもバッチ サイズ 1 に適合しない場合は、まずさまざまなデフォルト値を確認し、可能であれば値を下げます。たとえば、`generate`を使用し、広い検索ビームを使用しない場合は、大量のメモリを消費するため、検索ビームを狭くします。 8. fp32 では必ず混合半精度を使用します。つまり、Ampere 以上の GPU では bf16、古い GPU アーキテクチャでは fp16 を使用します。 9. それでも OOM を行う場合は、ハードウェアを追加するか、ZeRO-Infinity を有効にすることができます。つまり、オフロード `offload_param` と `offload_optimizer` を `nvme` に切り替えます。非常に高速な nvme であることを確認する必要があります。逸話として、ZeRO-Infinity を使用して小さな GPU で BLOOM-176B を推論することができましたが、非常に遅かったです。でも、うまくいきました! もちろん、最も GPU メモリ効率の高い構成から始めて、後から逆に進むことで、これらの手順を逆に実行することもできます。あるいは二等分してみてください。 OOM を引き起こさないバッチ サイズ 1 を取得したら、実効スループットを測定します。 次に、バッチ サイズをできるだけ大きくしてみます。バッチ サイズが大きいほど、乗算する行列が巨大な場合に GPU のパフォーマンスが最高になるため、GPU の効率が向上します。 ここで、パフォーマンス最適化ゲームが始まります。一部のオフロード機能をオフにするか、ZeRO 段階でステップダウンしてバッチ サイズを増減して、実効スループットを再度測定することができます。満足するまで洗い流し、繰り返します。 永遠にこれに費やす必要はありませんが、3 か月のトレーニングを開始しようとしている場合は、スループットに関して最も効果的な設定を見つけるために数日かけてください。そのため、トレーニングのコストが最小限になり、トレーニングをより早く完了できます。現在の目まぐるしく変化する ML の世界では、何かをトレーニングするのにさらに 1 か月かかる場合、絶好の機会を逃す可能性があります。もちろん、これは私が意見を共有しているだけであり、決してあなたを急かそうとしているわけではありません。 BLOOM-176B のトレーニングを開始する前に、このプロセスに 2 日間費やし、スループットを 90 TFLOP から 150 TFLOP に向上させることができました。この取り組みにより、トレーニング時間を 1 か月以上節約できました。 これらのメモは主にトレーニング モード用に書かれたものですが、ほとんどの場合は推論にも適用されるはずです。たとえば、勾配チェックポイントはトレーニング中にのみ役立つため、推論中は何も行われません。さらに、マルチ GPU 推論を実行していて、[DeepSpeed-Inference](https://www.deepspeed.ai/tutorials/inference-tutorial/)、[Accelerate](https://ハグフェイス.co/blog/bloom-inference-pytorch-scripts) は優れたパフォーマンスを提供するはずです。 その他のパフォーマンス関連の簡単なメモ: - 何かを最初からトレーニングしている場合は、常に 16 で割り切れる形状のテンソル (隠れたサイズなど) を使用するようにしてください。バッチ サイズについては、少なくとも 2 で割り切れるようにしてください。 GPU からさらに高いパフォーマンスを引き出したい場合は、ハードウェア固有の [波とタイルの量子化](https://developer.nvidia.com/blog/optimizing-gpu-performance-tensor-cores/) の可分性があります。 ### Activation Checkpointing or Gradient Checkpointing アクティベーション チェックポイントと勾配チェックポイントは、同じ方法論を指す 2 つの異なる用語です。とてもややこしいですが、こんな感じです。 勾配チェックポイントを使用すると、速度を GPU メモリと引き換えにできます。これにより、GPU OOM を克服したり、バッチ サイズを増やすことができ、多くの場合、パフォーマンスの向上につながります。 HF Transformers モデルは、DeepSpeed のアクティベーション チェックポイントについて何も知らないため、DeepSpeed 構成ファイルでその機能を有効にしようとしても、何も起こりません。 したがって、この非常に有益な機能を活用するには 2 つの方法があります。 1. HF Transformers モデルを使用したい場合は、`model.gradient_checkpointing_enable()` を実行するか、HF トレーナーで `--gradient_checkpointing` を使用します。これにより、これが自動的に有効になります。そこで使われるのが `torch.utils.checkpoint` です。 2. 独自のモデルを作成し、DeepSpeed のアクティベーション チェックポイントを使用したい場合は、[そこで規定されている API](https://deepspeed.readthedocs.io/en/latest/activation-checkpointing.html) を使用できます。 HF Transformers モデリング コードを使用して、`torch.utils.checkpoint` を DeepSpeed の API に置き換えることもできます。後者は、順方向アクティベーションを再計算する代わりに CPU メモリにオフロードできるため、より柔軟です。 ### Optimizer and Scheduler `offload_optimizer`を有効にしない限り、DeepSpeed スケジューラーと HuggingFace スケジューラーを組み合わせて使用​​できます。 オプティマイザー (HuggingFace スケジューラーと DeepSpeed オプティマイザーの組み合わせを除く): | Combos | HF Scheduler | DS Scheduler | |:-------------|:-------------|:-------------| | HF Optimizer | Yes | Yes | | DS Optimizer | No | Yes | `offload_optimizer`が有効な場合、CPU と GPU 実装 (LAMB を除く)。 <a id='deepspeed-optimizer'></a> #### Optimizer DeepSpeed の主なオプティマイザーは、Adam、AdamW、OneBitAdam、Lamb です。これらは ZeRO で徹底的にテストされており、 したがって、使用することをお勧めします。ただし、他のオプティマイザを「torch」からインポートすることはできます。完全なドキュメントは [こちら](https://www.deepspeed.ai/docs/config-json/#optimizer-parameters) にあります。 設定ファイルで `optimizer` エントリを設定しない場合、[`Trainer`] は 自動的に`AdamW`に設定され、指定された値または次のコマンドラインのデフォルトが使用されます。 引数: `--learning_rate`、`--adam_beta1`、`--adam_beta2`、`--adam_epsilon`、および `--weight_decay`。 以下は、`AdamW`の自動構成された`optimizer`エントリの例です。 ```json { "optimizer": { "type": "AdamW", "params": { "lr": "auto", "betas": "auto", "eps": "auto", "weight_decay": "auto" } } } ``` コマンドライン引数によって構成ファイル内の値が設定されることに注意してください。これは 1 つあるためです 値の決定的なソースを提供し、たとえば学習率が次のように設定されている場合に、見つけにくいエラーを回避します。 さまざまな場所でさまざまな価値観。コマンドラインのルール。オーバーライドされる値は次のとおりです。 - `lr` と `--learning_rate` の値 - `betas` と `--adam_beta1 --adam_beta2` の値 - `eps` と `--adam_epsilon` の値 - `weight_decay` と `--weight_decay` の値 したがって、コマンドラインで共有ハイパーパラメータを調整することを忘れないでください。 値を明示的に設定することもできます。 ```json { "optimizer": { "type": "AdamW", "params": { "lr": 0.001, "betas": [0.8, 0.999], "eps": 1e-8, "weight_decay": 3e-7 } } } ``` ただし、[`Trainer`] コマンドライン引数と DeepSpeed を自分で同期することになります。 構成。 上記にリストされていない別のオプティマイザーを使用する場合は、トップレベルの構成に追加する必要があります。 ```json { "zero_allow_untested_optimizer": true } ``` `AdamW`と同様に、公式にサポートされている他のオプティマイザーを構成できます。これらは異なる設定値を持つ可能性があることに注意してください。例えばAdam の場合は、`weight_decay`を`0.01`付近にする必要があります。 さらに、オフロードは、Deepspeed の CPU Adam オプティマイザーと併用すると最も効果的に機能します。 `deepspeed==0.8.3` なので、オフロードで別のオプティマイザーを使用したい場合は、以下も追加する必要があります。 ```json { "zero_force_ds_cpu_optimizer": false } ``` 最上位の構成に移行します。 <a id='deepspeed-scheduler'></a> #### Scheduler DeepSpeed は、`LRRangeTest`、`OneCycle`、`WarmupLR`、および`WarmupDecayLR`学習率スケジューラーをサポートしています。完全な ドキュメントは[ここ](https://www.deepspeed.ai/docs/config-json/#scheduler-parameters)です。 ここでは、🤗 Transformers と DeepSpeed の間でスケジューラーが重複する場所を示します。 - `--lr_scheduler_type constant_with_warmup` 経由の `WarmupLR` - `--lr_scheduler_type Linear` を介した `WarmupDecayLR`。これは `--lr_scheduler_type` のデフォルト値でもあります。 したがって、スケジューラを設定しない場合、これがデフォルトで設定されるスケジューラになります。 設定ファイルで `scheduler` エントリを設定しない場合、[`Trainer`] は `--lr_scheduler_type`、`--learning_rate`、および `--warmup_steps` または `--warmup_ratio` の値を設定します。 🤗 それのトランスフォーマーバージョン。 以下は、`WarmupLR`の自動構成された`scheduler`エントリの例です。 ```json { "scheduler": { "type": "WarmupLR", "params": { "warmup_min_lr": "auto", "warmup_max_lr": "auto", "warmup_num_steps": "auto" } } } ``` *"auto"* が使用されているため、[`Trainer`] 引数は設定に正しい値を設定します。 ファイル。これは、値の決定的なソースが 1 つあることと、たとえば次のような場合に見つけにくいエラーを避けるためです。 学習率は、場所ごとに異なる値に設定されます。コマンドラインのルール。設定される値は次のとおりです。 - `warmup_min_lr` の値は `0` です。 - `warmup_max_lr` と `--learning_rate` の値。 - `warmup_num_steps` と `--warmup_steps` の値 (指定されている場合)。それ以外の場合は `--warmup_ratio` を使用します トレーニング ステップの数を乗算し、切り上げます。 - `total_num_steps` には `--max_steps` の値を指定するか、指定されていない場合は実行時に自動的に導出されます。 環境、データセットのサイズ、およびその他のコマンド ライン引数 ( `WarmupDecayLR`)。 もちろん、構成値の一部またはすべてを引き継いで、自分で設定することもできます。 ```json { "scheduler": { "type": "WarmupLR", "params": { "warmup_min_lr": 0, "warmup_max_lr": 0.001, "warmup_num_steps": 1000 } } } ``` ただし、[`Trainer`] コマンドライン引数と DeepSpeed を自分で同期することになります。 構成。 たとえば、`WarmupDecayLR`の場合は、次のエントリを使用できます。 ```json { "scheduler": { "type": "WarmupDecayLR", "params": { "last_batch_iteration": -1, "total_num_steps": "auto", "warmup_min_lr": "auto", "warmup_max_lr": "auto", "warmup_num_steps": "auto" } } } ``` `total_num_steps`、`warmup_max_lr`、`warmup_num_steps`、および `total_num_steps` はロード時に設定されます。 <a id='deepspeed-fp32'></a> ### fp32 Precision Deepspeed は、完全な fp32 と fp16 の混合精度をサポートします。 fp16 混合精度を使用すると、必要なメモリが大幅に削減され、速度が向上するため、 使用しているモデルがこのトレーニング モードで適切に動作しない場合は、使用しない方がよいでしょう。通常これ モデルが fp16 混合精度で事前トレーニングされていない場合に発生します (たとえば、これは bf16 で事前トレーニングされた場合によく発生します) モデル)。このようなモデルでは、オーバーフローまたはアンダーフローが発生し、`NaN`損失が発生する可能性があります。これがあなたの場合は、使用したいと思うでしょう 完全な fp32 モード。デフォルトの fp16 混合精度モードを次のように明示的に無効にします。 ```json { "fp16": { "enabled": false, } } ``` Ampere アーキテクチャ ベースの GPU を使用している場合、pytorch バージョン 1.7 以降は自動的に を使用するように切り替わります。 一部の操作でははるかに効率的な tf32 形式を使用しますが、結果は依然として fp32 になります。詳細と ベンチマークについては、[Ampere デバイス上の TensorFloat-32(TF32)](https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices) を参照してください。文書には以下が含まれます 何らかの理由でこの自動変換を使用したくない場合は、この自動変換を無効にする方法について説明します。 🤗 トレーナーでは、`--tf32` を使用して有効にするか、`--tf32 0` または `--no_tf32` を使用して無効にすることができます。デフォルトでは、PyTorch のデフォルトが使用されます。 <a id='deepspeed-amp'></a> ### Automatic Mixed Precision pytorch のような AMP の方法または apex のような方法で自動混合精度を使用できます。 ### fp16 fp16 (float16) を設定して pytorch AMP のようなモードを設定するには: ```json { "fp16": { "enabled": "auto", "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 } } ``` [`Trainer`] は、の値に基づいてそれを自動的に有効または無効にします。 `args.fp16_backend`。残りの設定値はあなた次第です。 このモードは、`--fp16 --fp16_backend amp`または`--fp16_full_eval`コマンドライン引数が渡されると有効になります。 このモードを明示的に有効/無効にすることもできます。 ```json { "fp16": { "enabled": true, "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 } } ``` ただし、[`Trainer`] コマンドライン引数と DeepSpeed を自分で同期することになります。 構成。 これが[ドキュメント](https://www.deepspeed.ai/docs/config-json/#fp16-training-options)です。 ### BF16 fp16 の代わりに bf16 (bfloat16) が必要な場合は、次の構成セクションが使用されます。 ```json { "bf16": { "enabled": "auto" } } ``` bf16 は fp32 と同じダイナミック レンジを備えているため、損失スケーリングは必要ありません。 このモードは、`--bf16` または `--bf16_full_eval` コマンドライン引数が渡されると有効になります。 このモードを明示的に有効/無効にすることもできます。 ```json { "bf16": { "enabled": true } } ``` <Tip> `deepspeed==0.6.0`の時点では、bf16 サポートは新しく実験的なものです。 bf16 が有効な状態で [勾配累積](#gradient-accumulation) を使用する場合は、bf16 で勾配が累積されることに注意する必要があります。この形式の精度が低いため、これは希望どおりではない可能性があります。損失のある蓄積につながります。 この問題を修正し、より高精度の `dtype` (fp16 または fp32) を使用するオプションを提供するための作業が行われています。 </Tip> ### NCCL Collectives 訓練体制の`dtype`があり、さまざまな削減や収集/分散操作などのコミュニケーション集合体に使用される別の`dtype`があります。 すべての収集/分散操作は、データが含まれているのと同じ `dtype` で実行されるため、bf16 トレーニング体制を使用している場合、データは bf16 で収集されます。収集は損失のない操作です。 さまざまなリデュース操作は非常に損失が大きい可能性があります。たとえば、複数の GPU 間で勾配が平均化される場合、通信が fp16 または bf16 で行われる場合、結果は損失が多くなる可能性があります。複数の数値を低精度でアドバタイズすると結果は正確ではないためです。 。 bf16 では fp16 よりも精度が低いため、さらにそうです。通常は非常に小さい grad を平均する際の損失が最小限に抑えられるため、fp16 で十分であることがよくあります。したがって、デフォルトでは、半精度トレーニングでは fp16 がリダクション演算のデフォルトとして使用されます。ただし、この機能を完全に制御でき、必要に応じて小さなオーバーヘッドを追加して、リダクションが累積 dtype として fp32 を使用し、結果の準備ができた場合にのみ半精度 `dtype` にダウンキャストするようにすることもできます。でトレーニング中です。 デフォルトをオーバーライドするには、新しい構成エントリを追加するだけです。 ```json { "communication_data_type": "fp32" } ``` この記事の執筆時点での有効な値は、"fp16"、"bfp16"、"fp32"です。 注: ステージ ゼロ 3 には、bf16 通信タイプに関するバグがあり、`deepspeed==0.8.1`で修正されました。 ### apex apex AMP のようなモード セットを設定するには: ```json "amp": { "enabled": "auto", "opt_level": "auto" } ``` [`Trainer`] は `args.fp16_backend` の値に基づいて自動的に設定します。 `args.fp16_opt_level`。 このモードは、`--fp16 --fp16_backend apex --fp16_opt_level 01`コマンド ライン引数が渡されると有効になります。 このモードを明示的に構成することもできます。 ```json { "amp": { "enabled": true, "opt_level": "O1" } } ``` ただし、[`Trainer`] コマンドライン引数と DeepSpeed を自分で同期することになります。 構成。 これは[ドキュメント](https://www.deepspeed.ai/docs/config-json/#automatic-mixed-precision-amp-training-options)です。 <a id='deepspeed-bs'></a> ### Batch Size バッチサイズを設定するには、次を使用します。 ```json { "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto" } ``` [`Trainer`] は自動的に `train_micro_batch_size_per_gpu` を次の値に設定します。 `args.per_device_train_batch_size`と`train_batch_size`を`args.world_size * args.per_device_train_batch_size * args.gradient_accumulation_steps`に変更します。 値を明示的に設定することもできます。 ```json { "train_batch_size": 12, "train_micro_batch_size_per_gpu": 4 } ``` ただし、[`Trainer`] コマンドライン引数と DeepSpeed を自分で同期することになります。 構成。 <a id='deepspeed-grad-acc'></a> ### Gradient Accumulation 勾配累積セットを構成するには: ```json { "gradient_accumulation_steps": "auto" } ``` [`Trainer`] は自動的にそれを `args.gradient_accumulation_steps` の値に設定します。 値を明示的に設定することもできます。 ```json { "gradient_accumulation_steps": 3 } ``` ただし、[`Trainer`] コマンドライン引数と DeepSpeed を自分で同期することになります。 構成。 <a id='deepspeed-grad-clip'></a> ### Gradient Clipping グラデーション グラデーション クリッピング セットを構成するには: ```json { "gradient_clipping": "auto" } ``` [`Trainer`] は自動的にそれを `args.max_grad_norm` の値に設定します。 値を明示的に設定することもできます。 ```json { "gradient_clipping": 1.0 } ``` ただし、[`Trainer`] コマンドライン引数と DeepSpeed を自分で同期することになります。 構成。 <a id='deepspeed-weight-extraction'></a> ### Getting The Model Weights Out トレーニングを継続し、DeepSpeed の使用を再開する限り、何も心配する必要はありません。 DeepSpeed ストア fp32 のカスタム チェックポイント オプティマイザー ファイル内のマスターの重み。これは `global_step*/*optim_states.pt` (これは glob パターン)、通常のチェックポイントの下に保存されます。 **FP16 ウェイト:** モデルを ZeRO-2 で保存すると、モデルの重みを含む通常の `pytorch_model.bin` ファイルが作成されますが、 これらは重みの fp16 バージョンにすぎません。 ZeRO-3 では、モデルの重みが複数の GPU に分割されるため、状況はさらに複雑になります。 したがって、fp16 を保存するための `Trainer` を取得するには、`"stage3_gather_16bit_weights_on_model_save": true` が必要です。 重みのバージョン。この設定が`False`の場合、`pytorch_model.bin`は作成されません。これは、デフォルトで DeepSpeed の `state_dict` に実際の重みではなくプレースホルダーが含まれるためです。この `state_dict` を保存した場合、ロードし直すことはできません。 ```json { "zero_optimization": { "stage3_gather_16bit_weights_on_model_save": true } } ``` **FP32 重量:** fp16 ウェイトはトレーニングを再開するのに適していますが、モデルの微調整が完了し、それを [モデル ハブ](https://huggingface.co/models) にアクセスするか、fp32 を入手したいと思われる他の人に渡します。 重み。これは大量のメモリを必要とするプロセスであるため、トレーニング中に行うべきではないのが理想的です。 したがって、トレーニングの完了後にオフラインで実行するのが最適です。ただし、必要に応じて、空き CPU が十分にある場合は、 同じトレーニング スクリプトで実行できることを思い出してください。次のセクションでは、両方のアプローチについて説明します。 **ライブ FP32 ウェイト リカバリ:** モデルが大きく、トレーニングの終了時に空き CPU メモリがほとんど残っていない場合、このアプローチは機能しない可能性があります。 少なくとも 1 つのチェックポイントを保存していて、最新のチェックポイントを使用したい場合は、次の手順を実行できます。 ```python from transformers.trainer_utils import get_last_checkpoint from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint checkpoint_dir = get_last_checkpoint(trainer.args.output_dir) fp32_model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir) ``` `--load_best_model_at_end` class:*~transformers.TrainingArguments* 引数を使用している場合 (最適なモデルを追跡するため) チェックポイント)、最初に最終モデルを明示的に保存してから、上記と同じことを行うことでトレーニングを終了できます。 ```python from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint checkpoint_dir = os.path.join(trainer.args.output_dir, "checkpoint-final") trainer.deepspeed.save_checkpoint(checkpoint_dir) fp32_model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir) ``` <Tip> `load_state_dict_from_zero_checkpoint` が実行されると、`model` はもはや使用できなくなることに注意してください。 同じアプリケーションの DeepSpeed コンテキスト。つまり、deepspeed エンジンを再初期化する必要があります。 `model.load_state_dict(state_dict)` はそこからすべての DeepSpeed マジックを削除します。したがって、これは最後にのみ実行してください トレーニングの様子。 </Tip> もちろん、class:*~transformers.Trainer* を使用する必要はなく、上記の例を独自のものに調整することができます。 トレーナー。 何らかの理由でさらに改良したい場合は、重みの fp32 `state_dict` を抽出して適用することもできます。 次の例に示すように、これらは自分で作成します。 ```python from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu model = model.cpu() model.load_state_dict(state_dict) ``` **オフライン FP32 ウェイト リカバリ:** DeepSpeed は特別な変換スクリプト`zero_to_fp32.py`を作成し、チェックポイントの最上位に配置します。 フォルダ。このスクリプトを使用すると、いつでも重みを抽出できます。スクリプトはスタンドアロンなので、もう必要ありません。 抽出を行うための設定ファイルまたは `Trainer` が必要です。 チェックポイント フォルダーが次のようになっているとします。 ```bash $ ls -l output_dir/checkpoint-1/ -rw-rw-r-- 1 stas stas 1.4K Mar 27 20:42 config.json drwxrwxr-x 2 stas stas 4.0K Mar 25 19:52 global_step1/ -rw-rw-r-- 1 stas stas 12 Mar 27 13:16 latest -rw-rw-r-- 1 stas stas 827K Mar 27 20:42 optimizer.pt -rw-rw-r-- 1 stas stas 231M Mar 27 20:42 pytorch_model.bin -rw-rw-r-- 1 stas stas 623 Mar 27 20:42 scheduler.pt -rw-rw-r-- 1 stas stas 1.8K Mar 27 20:42 special_tokens_map.json -rw-rw-r-- 1 stas stas 774K Mar 27 20:42 spiece.model -rw-rw-r-- 1 stas stas 1.9K Mar 27 20:42 tokenizer_config.json -rw-rw-r-- 1 stas stas 339 Mar 27 20:42 trainer_state.json -rw-rw-r-- 1 stas stas 2.3K Mar 27 20:42 training_args.bin -rwxrw-r-- 1 stas stas 5.5K Mar 27 13:16 zero_to_fp32.py* ``` この例では、DeepSpeed チェックポイント サブフォルダー *global_step1* が 1 つだけあります。したがって、FP32を再構築するには 重みを実行するだけです: ```bash python zero_to_fp32.py . pytorch_model.bin ``` これだよ。 `pytorch_model.bin`には、複数の GPU から統合された完全な fp32 モデルの重みが含まれるようになります。 スクリプトは、ZeRO-2 または ZeRO-3 チェックポイントを自動的に処理できるようになります。 `python zero_to_fp32.py -h` を実行すると、使用方法の詳細が表示されます。 スクリプトは、ファイル`latest`の内容を使用して deepspeed サブフォルダーを自動検出します。 例には`global_step1`が含まれます。 注: 現在、スクリプトには最終的な fp32 モデルの重みの 2 倍の一般 RAM が必要です。 ### ZeRO-3 と Infinity Nuances ZeRO-3 は、パラメータ シャーディング機能の点で ZeRO-2 とは大きく異なります。 ZeRO-Infinity は ZeRO-3 をさらに拡張し、NVMe メモリやその他の複数の速度とスケーラビリティの向上をサポートします。 モデルに特別な変更を加える必要がなくても正常に動作するようにあらゆる努力が払われてきましたが、特定の点では 状況によっては、次の情報が必要になる場合があります。 #### Constructing Massive Models DeepSpeed/ZeRO-3 は、既存の RAM に収まらない可能性のある数兆のパラメータを持つモデルを処理できます。そのような場合、 また、初期化をより高速に実行したい場合は、*deepspeed.zero.Init()* を使用してモデルを初期化します。 コンテキスト マネージャー (関数デコレーターでもあります)。次のようになります。 ```python from transformers import T5ForConditionalGeneration, T5Config import deepspeed with deepspeed.zero.Init(): config = T5Config.from_pretrained("google-t5/t5-small") model = T5ForConditionalGeneration(config) ``` ご覧のとおり、これによりランダムに初期化されたモデルが得られます。 事前トレーニングされたモデルを使用したい場合、`model_class.from_pretrained` は次の条件を満たす限りこの機能を有効にします。 `is_deepspeed_zero3_enabled()` は `True` を返します。これは現在、 [`TrainingArguments`] オブジェクト (渡された DeepSpeed 構成ファイルに ZeRO-3 構成が含まれている場合) セクション。したがって、呼び出しの前に** [`TrainingArguments`] オブジェクトを作成する必要があります。 `from_pretrained`。考えられるシーケンスの例を次に示します。 ```python from transformers import AutoModel, Trainer, TrainingArguments training_args = TrainingArguments(..., deepspeed=ds_config) model = AutoModel.from_pretrained("google-t5/t5-small") trainer = Trainer(model=model, args=training_args, ...) ``` 公式のサンプル スクリプトを使用していて、コマンド ライン引数に `--deepspeed ds_config.json` が含まれている場合 ZeRO-3 設定を有効にすると、これがサンプル スクリプトの記述方法であるため、すべてがすでに完了しています。 注: モデルの fp16 重みが単一の GPU のメモリに収まらない場合は、この機能を使用する必要があります。 この方法とその他の関連機能の詳細については、[大規模モデルの構築](https://deepspeed.readthedocs.io/en/latest/zero3.html#constructing-massive-models) を参照してください。 また、fp16 で事前訓練されたモデルをロードするときは、`from_pretrained` に使用するように指示する必要があります。 `torch_dtype=torch.float16`。詳細については、[from_pretrained-torch-dtype](#from_pretrained-torch-dtype) を参照してください。 #### Gathering Parameters 複数の GPU 上の ZeRO-3 では、現在の GPU のパラメータでない限り、単一の GPU がすべてのパラメータを持つことはありません。 実行層。したがって、すべてのレイヤーのすべてのパラメーターに一度にアクセスする必要がある場合は、それを行うための特定の方法があります。 ほとんどの場合は必要ありませんが、必要な場合は、[パラメータの収集](https://deepspeed.readthedocs.io/en/latest/zero3.html#manual-parameter-coordination) を参照してください。 ただし、いくつかの場所で内部的に使用しています。その例の 1 つは、事前トレーニングされたモデルの重みをロードするときです。 `from_pretrained`。一度に 1 つのレイヤーをロードし、参加しているすべての GPU に即座に分割します。 大規模なモデルでは、メモリの関係で、1 つの GPU にロードしてから複数の GPU に分散することはできません。 制限。 また、ZeRO-3 では、独自のコードを作成し、次のようなモデル パラメーターの重みが発生するとします。 ```python tensor([1.0], device="cuda:0", dtype=torch.float16, requires_grad=True) ``` `tensor([1.])` にストレスを感じた場合、またはパラメータのサイズが `1` であるというエラーが発生した場合 より大きな多次元形状。これは、パラメーターが分割されており、表示されるのは ZeRO-3 プレースホルダーであることを意味します。 <a id='deepspeed-zero-inference'></a> ### ZeRO Inference ZeRO Inference は、ZeRO-3 Training と同じ構成を使用します。オプティマイザーとスケジューラーのセクションは必要ありません。で 実際、同じものをトレーニングと共有したい場合は、これらを設定ファイルに残すことができます。彼らはただそうなるだろう 無視されました。 それ以外の場合は、通常の [`TrainingArguments`] 引数を渡すだけです。例えば: ```bash deepspeed --num_gpus=2 your_program.py <normal cl args> --do_eval --deepspeed ds_config.json ``` 唯一重要なことは、ZeRO-2 には何の利点もないため、ZeRO-3 構成を使用する必要があるということです。 ZeRO-3 のみがパラメーターのシャーディングを実行するのに対し、ZeRO-1 は勾配とオプティマイザーの状態をシャーディングするため、推論に役立ちます。 以下は、利用可能なすべての GPU をデプロイする DeepSpeed で`run_translation.py`を実行する例です。 ```bash deepspeed examples/pytorch/translation/run_translation.py \ --deepspeed tests/deepspeed/ds_config_zero3.json \ --model_name_or_path google-t5/t5-small --output_dir output_dir \ --do_eval --max_eval_samples 50 --warmup_steps 50 \ --max_source_length 128 --val_max_target_length 128 \ --overwrite_output_dir --per_device_eval_batch_size 4 \ --predict_with_generate --dataset_config "ro-en" --fp16 \ --source_lang en --target_lang ro --dataset_name wmt16 \ --source_prefix "translate English to Romanian: " ``` 推論のために、オプティマイザーの状態と勾配によって使用される追加の大きなメモリは必要ないため、 はるかに大きなバッチやシーケンス長を同じハードウェアに適合できる必要があります。 さらに、DeepSpeed は現在、Deepspeed-Inference と呼ばれる関連製品を開発していますが、これとは何の関係もありません。 ZeRO テクノロジーに準拠していますが、代わりにテンソル並列処理を使用して、単一の GPU に収まらないモデルをスケーリングします。これは 現在開発中です。製品が完成したら統合を提供する予定です。 ### Memory Requirements Deepspeed ZeRO はメモリを CPU (および NVMe) にオフロードできるため、フレームワークは、使用されている GPU の数に応じて必要な CPU および GPU メモリの量を知ることができるユーティリティを提供します。 単一の GPU で `bigscience/T0_3B`を微調整するために必要なメモリの量を見積もってみましょう。 ```bash $ python -c 'from transformers import AutoModel; \ from deepspeed.runtime.zero.stage3 import estimate_zero3_model_states_mem_needs_all_live; \ model = AutoModel.from_pretrained("bigscience/T0_3B"); \ estimate_zero3_model_states_mem_needs_all_live(model, num_gpus_per_node=1, num_nodes=1)' [...] Estimated memory needed for params, optim states and gradients for a: HW: Setup with 1 node, 1 GPU per node. SW: Model with 2783M total params, 65M largest layer params. per CPU | per GPU | Options 70.00GB | 0.25GB | offload_param=cpu , offload_optimizer=cpu , zero_init=1 70.00GB | 0.25GB | offload_param=cpu , offload_optimizer=cpu , zero_init=0 62.23GB | 5.43GB | offload_param=none, offload_optimizer=cpu , zero_init=1 62.23GB | 5.43GB | offload_param=none, offload_optimizer=cpu , zero_init=0 0.37GB | 46.91GB | offload_param=none, offload_optimizer=none, zero_init=1 15.56GB | 46.91GB | offload_param=none, offload_optimizer=none, zero_init=0 ``` したがって、単一の 80 GB GPU で CPU オフロードなしで搭載することも、小さな 8 GB GPU でも最大 60 GB の CPU メモリが必要になることも可能です。 (これはパラメータ、オプティマイザの状態、および勾配のためのメモリであることに注意してください。cuda カーネル、アクティベーション、および一時メモリにはもう少し多くのメモリが必要です。) 次に、コストと速度のトレードオフになります。より小さい GPU を購入またはレンタルした方が安くなります (Deepspeed ZeRO では複数の GPU を使用できるため、GPU の数を減らすこともできます)。しかし、その場合は遅くなります。そのため、何かを実行する速度を気にしなくても、速度の低下は GPU の使用時間に直接影響し、コストが増大するため、どれが最も効果的かを実験して比較してください。 十分な GPU メモリがある場合は、すべてが高速になるため、CPU/NVMe オフロードを必ず無効にしてください。 たとえば、2 つの GPU に対して同じことを繰り返してみましょう。 ```bash $ python -c 'from transformers import AutoModel; \ from deepspeed.runtime.zero.stage3 import estimate_zero3_model_states_mem_needs_all_live; \ model = AutoModel.from_pretrained("bigscience/T0_3B"); \ estimate_zero3_model_states_mem_needs_all_live(model, num_gpus_per_node=2, num_nodes=1)' [...] Estimated memory needed for params, optim states and gradients for a: HW: Setup with 1 node, 2 GPUs per node. SW: Model with 2783M total params, 65M largest layer params. per CPU | per GPU | Options 70.00GB | 0.25GB | offload_param=cpu , offload_optimizer=cpu , zero_init=1 70.00GB | 0.25GB | offload_param=cpu , offload_optimizer=cpu , zero_init=0 62.23GB | 2.84GB | offload_param=none, offload_optimizer=cpu , zero_init=1 62.23GB | 2.84GB | offload_param=none, offload_optimizer=cpu , zero_init=0 0.74GB | 23.58GB | offload_param=none, offload_optimizer=none, zero_init=1 31.11GB | 23.58GB | offload_param=none, offload_optimizer=none, zero_init=0 ``` したがって、ここでは、CPU にオフロードせずに 2x 32GB 以上の GPU が必要になります。 詳細については、[メモリ推定ツール](https://deepspeed.readthedocs.io/en/latest/memory.html) を参照してください。 ### Filing Issues ここでは、問題の真相をすぐに解明し、作業のブロックを解除できるよう、問題を報告する方法を説明します。 レポートには必ず次の内容を含めてください。 1. レポート内の完全な Deepspeed 構成ファイル 2. [`Trainer`] を使用している場合はコマンドライン引数、または トレーナーのセットアップを自分でスクリプト作成している場合は、[`TrainingArguments`] 引数。しないでください [`TrainingArguments`] には無関係なエントリが多数含まれているため、ダンプします。 3. 次の出力: ```bash python -c 'import torch; print(f"torch: {torch.__version__}")' python -c 'import transformers; print(f"transformers: {transformers.__version__}")' python -c 'import deepspeed; print(f"deepspeed: {deepspeed.__version__}")' ``` 4. 可能であれば、問題を再現できる Google Colab ノートブックへのリンクを含めてください。これを使えます [ノートブック](https://github.com/stas00/porting/blob/master/transformers/deepspeed/DeepSpeed_on_colab_CLI.ipynb) として 出発点。 5. 不可能でない限り、カスタムデータセットではなく、常に使用できる標準データセットを使用してください。 6. 可能であれば、既存の [サンプル](https://github.com/huggingface/transformers/tree/main/examples/pytorch) のいずれかを使用して問題を再現してみてください。 - Deepspeed が問題の原因ではないことがよくあります。 提出された問題の一部は、Deepspeed とは無関係であることが判明しました。それは、Deepspeed がセットアップから削除された後です。 問題はまだ残っていた。 したがって、完全に明白でない場合は、DeepSpeed 関連の問題です。 例外が発生し、DeepSpeed モジュールが関係していることがわかります。まず、DeepSpeed を含まないセットアップを再テストしてください。 問題が解決しない場合にのみ、Deepspeed について言及し、必要な詳細をすべて提供してください。 - 問題が統合部分ではなく DeepSpeed コアにあることが明らかな場合は、問題を提出してください。 [Deepspeed](https://github.com/microsoft/DeepSpeed/) を直接使用します。よくわからない場合でも、ご安心ください。 どちらの問題トラッカーでも問題ありません。投稿されたらそれを判断し、次の場合は別の問題トラッカーにリダイレクトします。 そうである必要がある。 ### Troubleshooting #### the `deepspeed` process gets killed at startup without a traceback `deepspeed`プロセスが起動時にトレースバックなしで強制終了された場合、それは通常、プログラムが試行したことを意味します。 システムが持っているよりも多くの CPU メモリを割り当てるか、プロセスが割り当てを許可されているため、OS カーネルがそれを強制終了します。 プロセス。これは、設定ファイルに `offload_optimizer` または `offload_param` が含まれている可能性が高いためです。 どちらも`cpu`にオフロードするように設定されています。 NVMe を使用している場合は、次の環境で実行している場合は NVMe へのオフロードを試してください。 ゼロ-3。 [特定のモデルに必要なメモリ量を見積もる]方法は次のとおりです(https://deepspeed.readthedocs.io/en/latest/memory.html)。 #### training and/or eval/predict loss is `NaN` これは、bf16 混合精度モードで事前トレーニングされたモデルを取得し、それを fp16 (混合精度の有無にかかわらず) で使用しようとした場合によく発生します。 TPU でトレーニングされたほとんどのモデル、および多くの場合、Google によってリリースされたモデルは、このカテゴリに分類されます (たとえば、ほぼすべての t5 ベースのモデル)。ここでの解決策は、ハードウェアがサポートしている場合 (TPU、Ampere GPU 以降)、fp32 または bf16 を使用することです。 ```json { "fp16": { "enabled": "auto", "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 } } ``` ログには、Deepspeed が次のように`OVERFLOW!`を報告していることがわかります。 ``` 0%| | 0/189 [00:00<?, ?it/s] [deepscale] OVERFLOW! Rank 0 Skipping step. Attempted loss scale: 262144, reducing to 262144 1%|▌ | 1/189 [00:00<01:26, 2.17it/s] [deepscale] OVERFLOW! Rank 0 Skipping step. Attempted loss scale: 262144, reducing to 131072.0 1%|█▏ [...] [deepscale] OVERFLOW! Rank 0 Skipping step. Attempted loss scale: 1, reducing to 1 14%|████████████████▌ | 27/189 [00:14<01:13, 2.21it/s] [deepscale] OVERFLOW! Rank 0 Skipping step. Attempted loss scale: 1, reducing to 1 15%|█████████████████▏ | 28/189 [00:14<01:13, 2.18it/s] [deepscale] OVERFLOW! Rank 0 Skipping step. Attempted loss scale: 1, reducing to 1 15%|█████████████████▊ | 29/189 [00:15<01:13, 2.18it/s] [deepscale] OVERFLOW! Rank 0 Skipping step. Attempted loss scale: 1, reducing to 1 [...] ``` これは、Deepspeed 損失スケーラーが損失オーバーフローを克服するスケーリング係数を見つけられないことを意味します。 (ログはここで読みやすくするためにマッサージされています。) この場合、通常は `initial_scale_power` の値を上げる必要があります。通常、`initial_scale_power: 32` に設定すると問題が解決します。 ### Notes - DeepSpeed には pip でインストール可能な PyPI パッケージがありますが、ハードウェアに最も適合するように、また有効にする必要がある場合は、[ソース](https://github.com/microsoft/deepspeed#installation) からインストールすることを強くお勧めします。 1 ビット Adam などの特定の機能は、pypi ディストリビューションでは利用できません。 - 🤗 Transformers で DeepSpeed を使用するために [`Trainer`] を使用する必要はありません - 任意のモデルを使用できます 後者は [DeepSpeed 統合手順](https://www.deepspeed.ai/getting-started/#writing-deepspeed-models) に従って調整する必要があります。 ## Non-Trainer Deepspeed Integration [`~integrations.HfDeepSpeedConfig`] は、Deepspeed を 🤗 Transformers コアに統合するために使用されます [`Trainer`] を使用しない場合の機能。実行する唯一のことは、Deepspeed ZeRO-3 パラメータ収集を処理し、`from_pretrained`呼び出し中にモデルを複数の GPU に自動的に分割することです。それ以外はすべて自分で行う必要があります。 [`Trainer`] を使用すると、すべてが自動的に処理されます。 [`Trainer`] を使用しない場合、DeepSpeed ZeRO-3 を効率的に導入するには、 モデルをインスタンス化する前に [`~integrations.HfDeepSpeedConfig`] オブジェクトを削除し、そのオブジェクトを生きたままにします。 Deepspeed ZeRO-1 または ZeRO-2 を使用している場合は、`HfDeepSpeedConfig`を使用する必要はまったくありません。 たとえば、事前トレーニングされたモデルの場合は次のようになります。 ```python from transformers.integrations import HfDeepSpeedConfig from transformers import AutoModel import deepspeed ds_config = {...} # deepspeed config object or path to the file # must run before instantiating the model to detect zero 3 dschf = HfDeepSpeedConfig(ds_config) # keep this object alive model = AutoModel.from_pretrained("openai-community/gpt2") engine = deepspeed.initialize(model=model, config_params=ds_config, ...) ``` または、事前トレーニングされていないモデルの場合: ```python from transformers.integrations import HfDeepSpeedConfig from transformers import AutoModel, AutoConfig import deepspeed ds_config = {...} # deepspeed config object or path to the file # must run before instantiating the model to detect zero 3 dschf = HfDeepSpeedConfig(ds_config) # keep this object alive config = AutoConfig.from_pretrained("openai-community/gpt2") model = AutoModel.from_config(config) engine = deepspeed.initialize(model=model, config_params=ds_config, ...) ``` [`Trainer`] 統合を使用していない場合は、完全に独力で行うことになることに注意してください。基本的には、[Deepspeed](https://www.deepspeed.ai/) Web サイトのドキュメントに従ってください。また、設定ファイルを明示的に設定する必要があります。`"auto"`値は使用できず、代わりに実際の値を入力する必要があります。 ## HfDeepSpeedConfig [[autodoc]] integrations.HfDeepSpeedConfig - all ### Custom DeepSpeed ZeRO Inference 以下は、単一の GPU にモデルを適合できない場合に、[`Trainer`] を使用せずに DeepSpeed ZeRO 推論を実行する方法の例です。解決策には、追加の GPU の使用、または GPU メモリを CPU メモリにオフロードすることが含まれます。 ここで理解すべき重要なニュアンスは、ZeRO の設計方法により、異なる GPU で異なる入力を並行して処理できるということです。 この例には大量のメモがあり、自己文書化されています。 必ず次のことを行ってください。 1. 十分な GPU メモリがある場合は、CPU オフロードを無効にします (速度が低下するため)。 2. Ampere または新しい GPU を所有している場合は、処理を高速化するために bf16 を有効にします。そのハードウェアがない場合は、bf16 混合精度で事前トレーニングされたモデル (ほとんどの t5 モデルなど) を使用しない限り、fp16 を有効にすることができます。これらは通常、fp16 でオーバーフローし、出力としてガベージが表示されます。 ```python #!/usr/bin/env python # This script demonstrates how to use Deepspeed ZeRO in an inference mode when one can't fit a model # into a single GPU # # 1. Use 1 GPU with CPU offload # 2. Or use multiple GPUs instead # # First you need to install deepspeed: pip install deepspeed # # Here we use a 3B "bigscience/T0_3B" model which needs about 15GB GPU RAM - so 1 largish or 2 # small GPUs can handle it. or 1 small GPU and a lot of CPU memory. # # To use a larger model like "bigscience/T0" which needs about 50GB, unless you have an 80GB GPU - # you will need 2-4 gpus. And then you can adapt the script to handle more gpus if you want to # process multiple inputs at once. # # The provided deepspeed config also activates CPU memory offloading, so chances are that if you # have a lot of available CPU memory and you don't mind a slowdown you should be able to load a # model that doesn't normally fit into a single GPU. If you have enough GPU memory the program will # run faster if you don't want offload to CPU - so disable that section then. # # To deploy on 1 gpu: # # deepspeed --num_gpus 1 t0.py # or: # python -m torch.distributed.run --nproc_per_node=1 t0.py # # To deploy on 2 gpus: # # deepspeed --num_gpus 2 t0.py # or: # python -m torch.distributed.run --nproc_per_node=2 t0.py from transformers import AutoTokenizer, AutoConfig, AutoModelForSeq2SeqLM from transformers.integrations import HfDeepSpeedConfig import deepspeed import os import torch os.environ["TOKENIZERS_PARALLELISM"] = "false" # To avoid warnings about parallelism in tokenizers # distributed setup local_rank = int(os.getenv("LOCAL_RANK", "0")) world_size = int(os.getenv("WORLD_SIZE", "1")) torch.cuda.set_device(local_rank) deepspeed.init_distributed() model_name = "bigscience/T0_3B" config = AutoConfig.from_pretrained(model_name) model_hidden_size = config.d_model # batch size has to be divisible by world_size, but can be bigger than world_size train_batch_size = 1 * world_size # ds_config notes # # - enable bf16 if you use Ampere or higher GPU - this will run in mixed precision and will be # faster. # # - for older GPUs you can enable fp16, but it'll only work for non-bf16 pretrained models - e.g. # all official t5 models are bf16-pretrained # # - set offload_param.device to "none" or completely remove the `offload_param` section if you don't # - want CPU offload # # - if using `offload_param` you can manually finetune stage3_param_persistence_threshold to control # - which params should remain on gpus - the larger the value the smaller the offload size # # For in-depth info on Deepspeed config see # https://huggingface.co/docs/transformers/main/main_classes/deepspeed # keeping the same format as json for consistency, except it uses lower case for true/false # fmt: off ds_config = { "fp16": { "enabled": False }, "bf16": { "enabled": False }, "zero_optimization": { "stage": 3, "offload_param": { "device": "cpu", "pin_memory": True }, "overlap_comm": True, "contiguous_gradients": True, "reduce_bucket_size": model_hidden_size * model_hidden_size, "stage3_prefetch_bucket_size": 0.9 * model_hidden_size * model_hidden_size, "stage3_param_persistence_threshold": 10 * model_hidden_size }, "steps_per_print": 2000, "train_batch_size": train_batch_size, "train_micro_batch_size_per_gpu": 1, "wall_clock_breakdown": False } # fmt: on # next line instructs transformers to partition the model directly over multiple gpus using # deepspeed.zero.Init when model's `from_pretrained` method is called. # # **it has to be run before loading the model AutoModelForSeq2SeqLM.from_pretrained(model_name)** # # otherwise the model will first be loaded normally and only partitioned at forward time which is # less efficient and when there is little CPU RAM may fail dschf = HfDeepSpeedConfig(ds_config) # keep this object alive # now a model can be loaded. model = AutoModelForSeq2SeqLM.from_pretrained(model_name) # initialise Deepspeed ZeRO and store only the engine object ds_engine = deepspeed.initialize(model=model, config_params=ds_config)[0] ds_engine.module.eval() # inference # Deepspeed ZeRO can process unrelated inputs on each GPU. So for 2 gpus you process 2 inputs at once. # If you use more GPUs adjust for more. # And of course if you have just one input to process you then need to pass the same string to both gpus # If you use only one GPU, then you will have only rank 0. rank = torch.distributed.get_rank() if rank == 0: text_in = "Is this review positive or negative? Review: this is the best cast iron skillet you will ever buy" elif rank == 1: text_in = "Is this review positive or negative? Review: this is the worst restaurant ever" tokenizer = AutoTokenizer.from_pretrained(model_name) inputs = tokenizer.encode(text_in, return_tensors="pt").to(device=local_rank) with torch.no_grad(): outputs = ds_engine.module.generate(inputs, synced_gpus=True) text_out = tokenizer.decode(outputs[0], skip_special_tokens=True) print(f"rank{rank}:\n in={text_in}\n out={text_out}") ``` それを`t0.py`として保存して実行しましょう。 ```bash $ deepspeed --num_gpus 2 t0.py rank0: in=Is this review positive or negative? Review: this is the best cast iron skillet you will ever buy out=Positive rank1: in=Is this review positive or negative? Review: this is the worst restaurant ever out=negative ``` これは非常に基本的な例であり、ニーズに合わせて調整してください。 ### `generate` nuances ZeRO Stage-3 で複数の GPU を使用する場合、`generate(..., synced_gpus=True)`を呼び出して GPU を同期する必要があります。これを行わないと、1 つの GPU が他の GPU より先に生成を終了した場合、残りの GPU が生成を停止した GPU からウェイトのシャードを受信できなくなるため、システム全体がハングします。 `transformers>=4.28` 以降、`synced_gpus` が明示的に指定されていない場合、これらの条件が検出されると自動的に `True` に設定されます。ただし、必要に応じて `synced_gpus` の値をオーバーライドすることもできます。 ## Deepspeed 統合のテスト DeepSpeed 統合を含む PR を送信する場合は、CircleCI PR CI セットアップには GPU がないことに注意してください。そのため、GPU を必要とするテストは別の CI で毎晩のみ実行されます。したがって、PR で緑色の CI レポートが表示されても、DeepSpeed テストが合格したことを意味するわけではありません。 DeepSpeed テストを実行するには、少なくとも以下を実行してください。 ```bash RUN_SLOW=1 pytest tests/deepspeed/test_deepspeed.py ``` モデリングまたは pytorch サンプル コードのいずれかを変更した場合は、Model Zoo テストも実行します。以下はすべての DeepSpeed テストを実行します。 ```bash RUN_SLOW=1 pytest tests/deepspeed ``` ## Main DeepSpeed Resources - [プロジェクトの github](https://github.com/microsoft/deepspeed) - [使用方法ドキュメント](https://www.deepspeed.ai/getting-started/) - [API ドキュメント](https://deepspeed.readthedocs.io/en/latest/index.html) - [ブログ投稿](https://www.microsoft.com/en-us/research/search/?q=deepspeed) 論文: - [ZeRO: 兆パラメータ モデルのトレーニングに向けたメモリの最適化](https://arxiv.org/abs/1910.02054) - [ZeRO-Offload: 10 億規模のモデル トレーニングの民主化](https://arxiv.org/abs/2101.06840) - [ZeRO-Infinity: 極限スケールの深層学習のための GPU メモリの壁を打ち破る](https://arxiv.org/abs/2104.07857) 最後に、HuggingFace [`Trainer`] は DeepSpeed のみを統合していることを覚えておいてください。 DeepSpeed の使用に関して問題や質問がある場合は、[DeepSpeed GitHub](https://github.com/microsoft/DeepSpeed/issues) に問題を提出してください。
transformers/docs/source/ja/main_classes/deepspeed.md/0
{ "file_path": "transformers/docs/source/ja/main_classes/deepspeed.md", "repo_id": "transformers", "token_count": 49427 }
270
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # ALIGN ## 概要 ALIGNモデルは、「[Scaling Up Visual and Vision-Language Representation Learning With Noisy Text Supervision](https://arxiv.org/abs/2102.05918)」という論文でChao Jia、Yinfei Yang、Ye Xia、Yi-Ting Chen、Zarana Parekh、Hieu Pham、Quoc V. Le、Yunhsuan Sung、Zhen Li、Tom Duerigによって提案されました。ALIGNはマルチモーダルな視覚言語モデルです。これは画像とテキストの類似度や、ゼロショット画像分類に使用できます。ALIGNは[EfficientNet](efficientnet)を視覚エンコーダーとして、[BERT](bert)をテキストエンコーダーとして搭載したデュアルエンコーダー構造を特徴とし、対照学習によって視覚とテキストの表現を整合させることを学びます。それまでの研究とは異なり、ALIGNは巨大でノイジーなデータセットを活用し、コーパスのスケールを利用して単純な方法ながら最先端の表現を達成できることを示しています。 論文の要旨は以下の通りです: *事前学習された表現は、多くの自然言語処理(NLP)および知覚タスクにとって重要になっています。NLPにおける表現学習は、人間のアノテーションのない生のテキストでの学習へと移行していますが、視覚および視覚言語の表現は依然として精巧な学習データセットに大きく依存しており、これは高価であったり専門知識を必要としたりします。視覚アプリケーションの場合、ImageNetやOpenImagesのような明示的なクラスラベルを持つデータセットを使用して学習されることがほとんどです。視覚言語の場合、Conceptual Captions、MSCOCO、CLIPなどの人気のあるデータセットはすべて、それぞれ無視できないデータ収集(およびクリーニング)プロセスを含みます。このコストのかかるキュレーションプロセスはデータセットのサイズを制限し、訓練されたモデルのスケーリングを妨げます。本論文では、Conceptual Captionsデータセットの高価なフィルタリングや後処理ステップなしで得られた、10億を超える画像alt-textペアのノイズの多いデータセットを活用します。シンプルなデュアルエンコーダーアーキテクチャは、対照損失を使用して画像とテキストペアの視覚的および言語的表現を整合させることを学習します。我々は、コーパスの規模がそのノイズを補い、このような単純な学習スキームでも最先端の表現につながることを示します。我々の視覚表現は、ImageNetやVTABなどの分類タスクへの転移において強力な性能を発揮します。整合した視覚的および言語的表現は、ゼロショット画像分類を可能にし、また、より洗練されたクロスアテンションモデルと比較しても、Flickr30KおよびMSCOCO画像テキスト検索ベンチマークにおいて新たな最先端の結果を達成します。また、これらの表現は、複雑なテキストおよびテキスト+画像のクエリを用いたクロスモーダル検索を可能にします。* このモデルは[Alara Dirik](https://huggingface.co/adirik)により提供されました。 オリジナルのコードは公開されておらず、この実装は元論文に基づいたKakao Brainの実装をベースにしています。 ## 使用例 ALIGNはEfficientNetを使用して視覚的特徴を、BERTを使用してテキスト特徴を取得します。テキストと視覚の両方の特徴は、同一の次元を持つ潜在空間に射影されます。射影された画像とテキスト特徴間のドット積が類似度スコアとして使用されます。 [`AlignProcessor`]は、テキストのエンコードと画像の前処理を両方行うために、[`EfficientNetImageProcessor`]と[`BertTokenizer`]を単一のインスタンスにラップします。以下の例は、[`AlignProcessor`]と[`AlignModel`]を使用して画像-テキスト類似度スコアを取得する方法を示しています。 ```python import requests import torch from PIL import Image from transformers import AlignProcessor, AlignModel processor = AlignProcessor.from_pretrained("kakaobrain/align-base") model = AlignModel.from_pretrained("kakaobrain/align-base") url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) candidate_labels = ["an image of a cat", "an image of a dog"] inputs = processor(text=candidate_labels, images=image, return_tensors="pt") with torch.no_grad(): outputs = model(**inputs) # this is the image-text similarity score logits_per_image = outputs.logits_per_image # we can take the softmax to get the label probabilities probs = logits_per_image.softmax(dim=1) print(probs) ``` ## 参考資料 ALIGNの使用を開始するのに役立つ公式のHugging Faceとコミュニティ(🌎で示されている)の参考資料の一覧です。 - [ALIGNとCOYO-700Mデータセット](https://huggingface.co/blog/vit-align)に関するブログ投稿。 - ゼロショット画像分類[デモ](https://huggingface.co/spaces/adirik/ALIGN-zero-shot-image-classification)。 - `kakaobrain/align-base` モデルの[モデルカード](https://huggingface.co/kakaobrain/align-base)。 ここに参考資料を提出したい場合は、気兼ねなくPull Requestを開いてください。私たちはそれをレビューいたします!参考資料は、既存のものを複製するのではなく、何か新しいことを示すことが理想的です。 ## AlignConfig [[autodoc]] AlignConfig - from_text_vision_configs ## AlignTextConfig [[autodoc]] AlignTextConfig ## AlignVisionConfig [[autodoc]] AlignVisionConfig ## AlignProcessor [[autodoc]] AlignProcessor ## AlignModel [[autodoc]] AlignModel - forward - get_text_features - get_image_features ## AlignTextModel [[autodoc]] AlignTextModel - forward ## AlignVisionModel [[autodoc]] AlignVisionModel - forward
transformers/docs/source/ja/model_doc/align.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/align.md", "repo_id": "transformers", "token_count": 2911 }
271
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # BioGPT ## Overview BioGPT モデルは、[BioGPT: generative pre-trained transformer for biomedical text generation and mining](https://academic.oup.com/bib/advance-article/doi/10.1093/bib/bbac409/6713511?guestAccessKey=a66d9b5d-4f83-4017-bb52-405815c907b9) by Renqian Luo、Liai Sun、Yingce Xia、 Tao Qin、Sheng Zhang、Hoifung Poon、Tie-Yan Liu。 BioGPT は、生物医学テキストの生成とマイニングのための、ドメイン固有の生成事前トレーニング済み Transformer 言語モデルです。 BioGPT は、Transformer 言語モデルのバックボーンに従い、1,500 万の PubMed 抄録で最初から事前トレーニングされています。 論文の要約は次のとおりです。 *事前トレーニング済み言語モデルは、一般的な自然言語領域での大きな成功に触発されて、生物医学領域でますます注目を集めています。一般言語ドメインの事前トレーニング済み言語モデルの 2 つの主なブランチ、つまり BERT (およびそのバリアント) と GPT (およびそのバリアント) のうち、1 つ目は BioBERT や PubMedBERT などの生物医学ドメインで広く研究されています。これらはさまざまな下流の生物医学的タスクで大きな成功を収めていますが、生成能力の欠如により応用範囲が制限されています。この論文では、大規模な生物医学文献で事前トレーニングされたドメイン固有の生成 Transformer 言語モデルである BioGPT を提案します。私たちは 6 つの生物医学的自然言語処理タスクで BioGPT を評価し、ほとんどのタスクで私たちのモデルが以前のモデルよりも優れていることを実証しました。特に、BC5CDR、KD-DTI、DDI のエンドツーエンド関係抽出タスクではそれぞれ 44.98%、38.42%、40.76% の F1 スコアを獲得し、PubMedQA では 78.2% の精度を獲得し、新記録を樹立しました。テキスト生成に関する私たちのケーススタディは、生物医学文献における BioGPT の利点をさらに実証し、生物医学用語の流暢な説明を生成します。* ## Usage tips - BioGPT は絶対位置埋め込みを備えたモデルであるため、通常は入力を左側ではなく右側にパディングすることをお勧めします。 - BioGPT は因果言語モデリング (CLM) 目的でトレーニングされているため、シーケンス内の次のトークンを予測するのに強力です。 run_generation.py サンプル スクリプトで確認できるように、この機能を利用すると、BioGPT は構文的に一貫したテキストを生成できます。 - モデルは、以前に計算されたキーと値のアテンション ペアである`past_key_values`(PyTorch の場合) を入力として受け取ることができます。この (past_key_values または past) 値を使用すると、モデルがテキスト生成のコンテキストで事前に計算された値を再計算できなくなります。 PyTorch の使用法の詳細については、BioGptForCausalLM.forward() メソッドの past_key_values 引数を参照してください。 このモデルは、[kamalkraj](https://huggingface.co/kamalkraj) によって提供されました。元のコードは [ここ](https://github.com/microsoft/BioGPT) にあります。 ## Documentation resources - [因果言語モデリング タスク ガイド](../tasks/language_modeling) ## BioGptConfig [[autodoc]] BioGptConfig ## BioGptTokenizer [[autodoc]] BioGptTokenizer - save_vocabulary ## BioGptModel [[autodoc]] BioGptModel - forward ## BioGptForCausalLM [[autodoc]] BioGptForCausalLM - forward ## BioGptForTokenClassification [[autodoc]] BioGptForTokenClassification - forward ## BioGptForSequenceClassification [[autodoc]] BioGptForSequenceClassification - forward
transformers/docs/source/ja/model_doc/biogpt.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/biogpt.md", "repo_id": "transformers", "token_count": 1982 }
272
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # CLIPSeg ## Overview CLIPSeg モデルは、Timo Lüddecke, Alexander Ecker によって [Image Segmentation using Text and Image Prompts](https://arxiv.org/abs/2112.10003) で提案されました。 そしてアレクサンダー・エッカー。 CLIPSeg は、ゼロショットおよびワンショット画像セグメンテーションのために、凍結された [CLIP](clip) モデルの上に最小限のデコーダを追加します。 論文の要約は次のとおりです。 *画像のセグメンテーションは通常、トレーニングによって解決されます。 オブジェクト クラスの固定セットのモデル。後で追加のクラスやより複雑なクエリを組み込むとコストがかかります これらの式を含むデータセットでモデルを再トレーニングする必要があるためです。ここでシステムを提案します 任意の情報に基づいて画像セグメンテーションを生成できます。 テスト時にプロンプ​​トが表示されます。プロンプトはテキストまたは 画像。このアプローチにより、統一されたモデルを作成できます。 3 つの一般的なセグメンテーション タスクについて (1 回トレーニング済み) 参照式のセグメンテーション、ゼロショット セグメンテーション、ワンショット セグメンテーションという明確な課題が伴います。 CLIP モデルをバックボーンとして構築し、これをトランスベースのデコーダで拡張して、高密度なデータ通信を可能にします。 予測。の拡張バージョンでトレーニングした後、 PhraseCut データセット、私たちのシステムは、フリーテキスト プロンプトまたは クエリを表す追加の画像。後者の画像ベースのプロンプトのさまざまなバリエーションを詳細に分析します。 この新しいハイブリッド入力により、動的適応が可能になります。 前述の 3 つのセグメンテーション タスクのみですが、 テキストまたは画像をクエリするバイナリ セグメンテーション タスクに 定式化することができる。最後に、システムがうまく適応していることがわかりました アフォーダンスまたはプロパティを含む一般化されたクエリ* <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/clipseg_architecture.png" alt="描画" width="600"/> <small> CLIPSeg の概要。 <a href="https://arxiv.org/abs/2112.10003">元の論文から抜粋。</a> </small> このモデルは、[nielsr](https://huggingface.co/nielsr) によって提供されました。 元のコードは [ここ](https://github.com/timojl/clipseg) にあります。 ## Usage tips - [`CLIPSegForImageSegmentation`] は、[`CLIPSegModel`] の上にデコーダを追加します。後者は [`CLIPModel`] と同じです。 - [`CLIPSegForImageSegmentation`] は、テスト時に任意のプロンプトに基づいて画像セグメンテーションを生成できます。プロンプトはテキストのいずれかです (`input_ids` としてモデルに提供される) または画像 (`conditional_pixel_values` としてモデルに提供される)。カスタムを提供することもできます 条件付き埋め込み (`conditional_embeddings`としてモデルに提供されます)。 ## Resources CLIPSeg の使用を開始するのに役立つ、公式 Hugging Face およびコミュニティ (🌎 で示されている) リソースのリスト。ここに含めるリソースの送信に興味がある場合は、お気軽にプル リクエストを開いてください。審査させていただきます。リソースは、既存のリソースを複製するのではなく、何か新しいものを示すことが理想的です。 <PipelineTag pipeline="image-segmentation"/> - [CLIPSeg を使用したゼロショット画像セグメンテーション](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/CLIPSeg/Zero_shot_image_segmentation_with_CLIPSeg.ipynb) を説明するノートブック。 ## CLIPSegConfig [[autodoc]] CLIPSegConfig - from_text_vision_configs ## CLIPSegTextConfig [[autodoc]] CLIPSegTextConfig ## CLIPSegVisionConfig [[autodoc]] CLIPSegVisionConfig ## CLIPSegProcessor [[autodoc]] CLIPSegProcessor ## CLIPSegModel [[autodoc]] CLIPSegModel - forward - get_text_features - get_image_features ## CLIPSegTextModel [[autodoc]] CLIPSegTextModel - forward ## CLIPSegVisionModel [[autodoc]] CLIPSegVisionModel - forward ## CLIPSegForImageSegmentation [[autodoc]] CLIPSegForImageSegmentation - forward
transformers/docs/source/ja/model_doc/clipseg.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/clipseg.md", "repo_id": "transformers", "token_count": 2201 }
273
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Deformable DETR ## Overview 変形可能 DETR モデルは、Xizhou Zhu、Weijie Su、Lewei Lu、Bin Li、Xiaogang Wang, Jifeng Dai によって [Deformable DETR: Deformable Transformers for End-to-End Object Detection](https://arxiv.org/abs/2010.04159) で提案されました 変形可能な DETR は、参照周囲の少数の主要なサンプリング ポイントのみに注目する新しい変形可能なアテンション モジュールを利用することにより、収束の遅さの問題と元の [DETR](detr) の制限された特徴の空間解像度を軽減します。 論文の要約は次のとおりです。 *DETR は、優れたパフォーマンスを実証しながら、物体検出における多くの手作業で設計されたコンポーネントの必要性を排除するために最近提案されました。ただし、画像特徴マップの処理における Transformer アテンション モジュールの制限により、収束が遅く、特徴の空間解像度が制限されるという問題があります。これらの問題を軽減するために、私たちは Deformable DETR を提案しました。この DETR のアテンション モジュールは、参照周囲の少数の主要なサンプリング ポイントのみに注目します。変形可能な DETR は、10 分の 1 のトレーニング エポックで、DETR よりも優れたパフォーマンス (特に小さなオブジェクトの場合) を達成できます。 COCO ベンチマークに関する広範な実験により、私たちのアプローチの有効性が実証されました。* <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/deformable_detr_architecture.png" alt="描画" width="600"/> <small> 変形可能な DETR アーキテクチャ。 <a href="https://arxiv.org/abs/2010.04159">元の論文</a>から抜粋。</small> このモデルは、[nielsr](https://huggingface.co/nielsr) によって提供されました。元のコードは [ここ](https://github.com/fundamentalvision/Deformable-DETR) にあります。 ## Usage tips - トレーニング Deformable DETR は、元の [DETR](detr) モデルをトレーニングすることと同等です。デモ ノートブックについては、以下の [resources](#resources) セクションを参照してください。 ## Resources Deformable DETR の使用を開始するのに役立つ公式 Hugging Face およびコミュニティ (🌎 で示される) リソースのリスト。 <PipelineTag pipeline="object-detection"/> - [`DeformableDetrForObjectDetection`] のカスタム データセットでの推論と微調整に関するデモ ノートブックは、[こちら](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/Deformable-DETR) にあります。 - [物体検出タスクガイド](../tasks/object_detection) も参照してください。 ここに含めるリソースの送信に興味がある場合は、お気軽にプル リクエストを開いてください。審査させていただきます。リソースは、既存のリソースを複製するのではなく、何か新しいものを示すことが理想的です。 ## DeformableDetrImageProcessor [[autodoc]] DeformableDetrImageProcessor - preprocess - post_process_object_detection ## DeformableDetrFeatureExtractor [[autodoc]] DeformableDetrFeatureExtractor - __call__ - post_process_object_detection ## DeformableDetrConfig [[autodoc]] DeformableDetrConfig ## DeformableDetrModel [[autodoc]] DeformableDetrModel - forward ## DeformableDetrForObjectDetection [[autodoc]] DeformableDetrForObjectDetection - forward
transformers/docs/source/ja/model_doc/deformable_detr.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/deformable_detr.md", "repo_id": "transformers", "token_count": 1792 }
274
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Efficient Inference on a Single GPU このガイドに加えて、[1つのGPUでのトレーニングガイド](perf_train_gpu_one)と[CPUでの推論ガイド](perf_infer_cpu)に関連する情報があります。 ## Flash Attention 2 <Tip> この機能は実験的であり、将来のバージョンで大幅に変更される可能性があります。たとえば、Flash Attention 2 APIは近い将来`BetterTransformer` APIに移行するかもしれません。 </Tip> Flash Attention 2は、トランスフォーマーベースのモデルのトレーニングと推論速度を大幅に高速化できます。Flash Attention 2は、Tri Dao氏によって[公式のFlash Attentionリポジトリ](https://github.com/Dao-AILab/flash-attention)で導入されました。Flash Attentionに関する科学論文は[こちら](https://arxiv.org/abs/2205.14135)で見ることができます。 Flash Attention 2を正しくインストールするには、上記のリポジトリに記載されているインストールガイドに従ってください。 以下のモデルに対してFlash Attention 2をネイティブサポートしています: - Llama - Falcon さらに多くのモデルにFlash Attention 2のサポートを追加することをGitHubで提案することもでき、変更を統合するためにプルリクエストを開くこともできます。サポートされているモデルは、パディングトークンを使用してトレーニングを含む、推論とトレーニングに使用できます(現在の`BetterTransformer` APIではサポートされていない)。 <Tip> Flash Attention 2は、モデルのdtypeが`fp16`または`bf16`の場合にのみ使用でき、NVIDIA-GPUデバイスでのみ実行されます。この機能を使用する前に、モデルを適切なdtypeにキャストし、サポートされているデバイスにロードしてください。 </Tip> ### Quick usage モデルでFlash Attention 2を有効にするには、`from_pretrained`の引数に`attn_implementation="flash_attention_2"`を追加します。 ```python import torch from transformers import AutoModelForCausalLM, AutoTokenizer, LlamaForCausalLM model_id = "tiiuae/falcon-7b" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained( model_id, torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2", ) ``` こちらは、生成または微調整のために使用するテキストです。 ### Expected speedups 特に長いシーケンスに対して、微調整と推論の際には、かなりの高速化が期待できます。ただし、Flash Attentionはパディングトークンを使用してアテンションスコアを計算しないため、シーケンスにパディングトークンが含まれる場合、バッチ推論においてアテンションスコアを手動でパッド/アンパッドする必要があり、パディングトークンを含むバッチ生成の大幅な遅延が発生します。 これを克服するために、トレーニング中にシーケンスにパディングトークンを使用せずにFlash Attentionを使用する必要があります(たとえば、データセットをパックすることにより、シーケンスを最大シーケンス長に達するまで連結することなど)。ここに[例](https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_clm.py#L516)が提供されています。 以下は、パディングトークンのない場合に、シーケンス長が4096の[tiiuae/falcon-7b](https://hf.co/tiiuae/falcon-7b)に対する単純なフォワードパスの予想される高速化です。さまざまなバッチサイズが示されています: <div style="text-align: center"> <img src="https://huggingface.co/datasets/ybelkada/documentation-images/resolve/main/falcon-7b-inference-large-seqlen.png"> </div> 以下は、パディングトークンのない場合に、シーケンス長が4096の[`meta-llama/Llama-7b-hf`](https://hf.co/meta-llama/Llama-7b-hf)に対する単純なフォワードパスの予想される高速化です。さまざまなバッチサイズが示されています: <div style="text-align: center"> <img src="https://huggingface.co/datasets/ybelkada/documentation-images/resolve/main/llama-7b-inference-large-seqlen.png"> </div> パディングトークンを含むシーケンス(パディングトークンを使用してトレーニングまたは生成する)の場合、アテンションスコアを正しく計算するために入力シーケンスをアンパッド/パッドする必要があります。比較的小さいシーケンス長の場合、純粋なフォワードパスではパディングトークンが30%未満しか埋められていないため、これはわずかな高速化をもたらします。 <div style="text-align: center"> <img src="https://huggingface.co/datasets/ybelkada/documentation-images/resolve/main/llama-2-small-seqlen-padding.png"> </div> しかし、大きなシーケンス長の場合、純粋な推論(トレーニングも含む)には興味深い高速化が得られます。 Flash Attentionは、アテンション計算をよりメモリ効率の良いものにし、大きなシーケンス長でのCUDA OOMの問題を回避できるようにします。大きなシーケンス長に対して最大20のメモリ削減をもたらすことがあります。詳細については、[公式のFlash Attentionリポジトリ](https://github.com/Dao-AILab/flash-attention)をご覧ください。 <div style="text-align: center"> <img src="https://huggingface.co/datasets/ybelkada/documentation-images/resolve/main/llama-2-large-seqlen-padding.png"> </div> ### Advanced usage この機能をモデルの最適化に多くの既存の機能と組み合わせることができます。以下にいくつかの例を示します: ### Combining Flash Attention 2 and 8-bit models この機能を8ビットの量子化と組み合わせることができます: ```python import torch from transformers import AutoModelForCausalLM, AutoTokenizer, LlamaForCausalLM model_id = "tiiuae/falcon-7b" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained( model_id, load_in_8bit=True, attn_implementation="flash_attention_2", ) ``` ### Combining Flash Attention 2 and 4-bit models この機能を 4 ビットの量子化と組み合わせることができます: ```python import torch from transformers import AutoModelForCausalLM, AutoTokenizer, LlamaForCausalLM model_id = "tiiuae/falcon-7b" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained( model_id, load_in_4bit=True, attn_implementation="flash_attention_2", ) ``` ### Combining Flash Attention 2 and PEFT この機能を使用して、Flash Attention 2をベースにアダプターをトレーニングする際にPEFTを組み合わせることができます。 ```python import torch from transformers import AutoModelForCausalLM, AutoTokenizer, LlamaForCausalLM from peft import LoraConfig model_id = "tiiuae/falcon-7b" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained( model_id, load_in_4bit=True, attn_implementation="flash_attention_2", ) lora_config = LoraConfig( r=8, task_type="CAUSAL_LM" ) model.add_adapter(lora_config) ... # train your model ``` ## BetterTransformer [BetterTransformer](https://huggingface.co/docs/optimum/bettertransformer/overview)は、🤗 TransformersモデルをPyTorchネイティブの高速パス実行に変換します。これにより、Flash Attentionなどの最適化されたカーネルが内部で呼び出されます。 BetterTransformerは、テキスト、画像、およびオーディオモデルの単一およびマルチGPUでの高速な推論をサポートしています。 <Tip> Flash Attentionは、fp16またはbf16のdtypeを使用するモデルにのみ使用できます。BetterTransformerを使用する前に、モデルを適切なdtypeにキャストしてください。 </Tip> ### Encoder models PyTorchネイティブの[`nn.MultiHeadAttention`](https://pytorch.org/blog/a-better-transformer-for-fast-transformer-encoder-inference/)アテンション高速パス、BetterTransformerと呼ばれるものは、[🤗 Optimumライブラリ](https://huggingface.co/docs/optimum/bettertransformer/overview)の統合を通じてTransformersと一緒に使用できます。 PyTorchのアテンション高速パスを使用すると、カーネルフュージョンと[ネストされたテンソル](https://pytorch.org/docs/stable/nested.html)の使用により、推論を高速化できます。詳細なベンチマーク情報は[このブログ記事](https://medium.com/pytorch/bettertransformer-out-of-the-box-performance-for-huggingface-transformers-3fbe27d50ab2)にあります。 [`optimum`](https://github.com/huggingface/optimum)パッケージをインストールした後、推論中にBetter Transformerを使用するには、関連する内部モジュールを呼び出すことで置き換える必要があります[`~PreTrainedModel.to_bettertransformer`]: ```python model = model.to_bettertransformer() ``` メソッド [`~PreTrainedModel.reverse_bettertransformer`] は、モデルを保存する前に使用すべきで、標準のトランスフォーマーモデリングを使用するためのものです: ```python model = model.reverse_bettertransformer() model.save_pretrained("saved_model") ``` BetterTransformer APIを使ったエンコーダーモデルの可能性について詳しく知るには、[このブログポスト](https://medium.com/pytorch/bettertransformer-out-of-the-box-performance-for-huggingface-transformers-3fbe27d50ab2)をご覧ください。 ### Decoder models テキストモデル、特にデコーダーベースのモデル(GPT、T5、Llamaなど)にとって、BetterTransformer APIはすべての注意操作を[`torch.nn.functional.scaled_dot_product_attention`オペレーター](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention)(SDPA)を使用するように変換します。このオペレーターはPyTorch 2.0以降でのみ利用可能です。 モデルをBetterTransformerに変換するには、以下の手順を実行してください: ```python from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m") # convert the model to BetterTransformer model.to_bettertransformer() # Use it for training or inference ``` SDPAは、ハードウェアや問題のサイズに応じて[Flash Attention](https://arxiv.org/abs/2205.14135)カーネルを使用することもできます。Flash Attentionを有効にするか、特定の設定(ハードウェア、問題サイズ)で使用可能かどうかを確認するには、[`torch.backends.cuda.sdp_kernel`](https://pytorch.org/docs/master/backends.html#torch.backends.cuda.sdp_kernel)をコンテキストマネージャとして使用します。 ```diff import torch from transformers import AutoModelForCausalLM, AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m", torch_dtype=torch.float16).to("cuda") # convert the model to BetterTransformer model.to_bettertransformer() input_text = "Hello my dog is cute and" inputs = tokenizer(input_text, return_tensors="pt").to("cuda") + with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False): outputs = model.generate(**inputs) print(tokenizer.decode(outputs[0], skip_special_tokens=True)) ``` もしトレースバックにバグが表示された場合 ```bash RuntimeError: No available kernel. Aborting execution. ``` Flash Attention の広範なカバレッジを持つかもしれない PyTorch のナイトリーバージョンを試してみることをお勧めします。 ```bash pip3 install -U --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cu118 ``` Or make sure your model is correctly casted in float16 or bfloat16 モデルが正しくfloat16またはbfloat16にキャストされていることを確認してください。 Have a look at [this detailed blogpost](https://pytorch.org/blog/out-of-the-box-acceleration/) to read more about what is possible to do with `BetterTransformer` + SDPA API. `BetterTransformer` + SDPA APIを使用して何が可能かについて詳しく読むには、[この詳細なブログポスト](https://pytorch.org/blog/out-of-the-box-acceleration/)をご覧ください。 ## `bitsandbytes` integration for FP4 mixed-precision inference FP4混合精度推論のための`bitsandbytes`統合 You can install `bitsandbytes` and benefit from easy model compression on GPUs. Using FP4 quantization you can expect to reduce up to 8x the model size compared to its native full precision version. Check out below how to get started. `bitsandbytes`をインストールし、GPUで簡単なモデルの圧縮を利用できます。FP4量子化を使用すると、ネイティブのフルプレシジョンバージョンと比較してモデルサイズを最大8倍削減できることが期待できます。以下を確認して、どのように始めるかをご覧ください。 <Tip> Note that this feature can also be used in a multi GPU setup. この機能は、マルチGPUセットアップでも使用できることに注意してください。 </Tip> ### Requirements [[requirements-for-fp4-mixedprecision-inference]] - Latest `bitsandbytes` library `pip install bitsandbytes>=0.39.0` - Install latest `accelerate` from source `pip install git+https://github.com/huggingface/accelerate.git` - Install latest `transformers` from source `pip install git+https://github.com/huggingface/transformers.git` ### Running FP4 models - single GPU setup - Quickstart 以下のコードを実行することで、簡単に単一のGPUでFP4モデルを実行できます: ```py from transformers import AutoModelForCausalLM model_name = "bigscience/bloom-2b5" model_4bit = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", load_in_4bit=True) ``` 注意: `device_map`はオプションですが、推論時に `device_map = 'auto'` を設定することが推奨されています。これにより、利用可能なリソースに効率的にモデルがディスパッチされます。 ### Running FP4 models - multi GPU setup 混合4ビットモデルを複数のGPUにロードする方法は、単一GPUセットアップと同じです(単一GPUセットアップと同じコマンドです): ```py model_name = "bigscience/bloom-2b5" model_4bit = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", load_in_4bit=True) ``` しかし、`accelerate`を使用して、各GPUに割り当てるGPU RAMを制御することができます。以下のように、`max_memory`引数を使用します: ```py max_memory_mapping = {0: "600MB", 1: "1GB"} model_name = "bigscience/bloom-3b" model_4bit = AutoModelForCausalLM.from_pretrained( model_name, device_map="auto", load_in_4bit=True, max_memory=max_memory_mapping ) ``` この例では、最初のGPUは600MBのメモリを使用し、2番目のGPUは1GBを使用します。 ### Advanced usage このメソッドのさらなる高度な使用法については、[量子化](main_classes/quantization)のドキュメンテーションページをご覧ください。 ## `bitsandbytes` integration for Int8 mixed-precision matrix decomposition <Tip> この機能は、マルチGPU環境でも使用できます。 </Tip> 論文[`LLM.int8():スケーラブルなTransformer向けの8ビット行列乗算`](https://arxiv.org/abs/2208.07339)によれば、Hugging Face統合がHub内のすべてのモデルでわずか数行のコードでサポートされています。このメソッドは、半精度(`float16`および`bfloat16`)の重みの場合に`nn.Linear`サイズを2倍、単精度(`float32`)の重みの場合は4倍に縮小し、外れ値に対してほとんど影響を与えません。 ![HFxbitsandbytes.png](https://cdn-uploads.huggingface.co/production/uploads/1659861207959-62441d1d9fdefb55a0b7d12c.png) Int8混合精度行列分解は、行列乗算を2つのストリームに分割することによって動作します:(1) システマティックな特徴外れ値ストリームがfp16で行列乗算(0.01%)、(2) int8行列乗算の通常のストリーム(99.9%)。この方法を使用すると、非常に大きなモデルに対して予測の劣化なしにint8推論が可能です。 このメソッドの詳細については、[論文](https://arxiv.org/abs/2208.07339)または[この統合に関するブログ記事](https://huggingface.co/blog/hf-bitsandbytes-integration)をご確認ください。 ![MixedInt8.gif](https://cdn-uploads.huggingface.co/production/uploads/1660567469965-62441d1d9fdefb55a0b7d12c.gif) なお、この機能を使用するにはGPUが必要であり、カーネルはGPU専用にコンパイルされている必要があります。この機能を使用する前に、モデルの1/4(またはハーフ精度の重みの場合は1/2)を保存するのに十分なGPUメモリがあることを確認してください。 このモジュールを使用する際のヘルプに関する詳細は、以下のノートをご覧いただくか、[Google Colabのデモ](#colab-demos)をご覧ください。 ### Requirements [[requirements-for-int8-mixedprecision-matrix-decomposition]] - `bitsandbytes<0.37.0`を使用する場合、NVIDIA GPUを使用していることを確認し、8ビットテンソルコアをサポートしていることを確認してください(Turing、Ampere、またはそれ以降のアーキテクチャー、例:T4、RTX20s RTX30s、A40-A100など)。`bitsandbytes>=0.37.0`の場合、すべてのGPUがサポートされるはずです。 - 正しいバージョンの`bitsandbytes`をインストールするには、次のコマンドを実行してください: `pip install bitsandbytes>=0.31.5` - `accelerate`をインストールします: `pip install accelerate>=0.12.0` ### Running mixed-Int8 models - single GPU setup 必要なライブラリをインストールした後、ミックス 8 ビットモデルを読み込む方法は次の通りです: ```py from transformers import AutoModelForCausalLM model_name = "bigscience/bloom-2b5" model_8bit = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", load_in_8bit=True) ``` 以下はシンプルな例です: * `pipeline()` 関数の代わりに、モデルの `generate()` メソッドを使用することをお勧めします。`pipeline()` 関数を使用して推論することは可能ですが、混合8ビットモデルに最適化されておらず、`generate()` メソッドを使用するよりも遅くなります。また、一部のサンプリング戦略(例:ヌクレウスサンプリング)は、`pipeline()` 関数では混合8ビットモデルではサポートされていません。 * すべての入力をモデルと同じデバイスに配置してください。 ```py from transformers import AutoModelForCausalLM, AutoTokenizer model_name = "bigscience/bloom-2b5" tokenizer = AutoTokenizer.from_pretrained(model_name) model_8bit = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", load_in_8bit=True) prompt = "Hello, my llama is cute" inputs = tokenizer(prompt, return_tensors="pt").to("cuda") generated_ids = model.generate(**inputs) outputs = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) ``` ### Running mixed-int8 models - multi GPU setup 複数のGPUに混合8ビットモデルをロードする方法は、次の通りです(シングルGPUセットアップと同じコマンドです): ```py model_name = "bigscience/bloom-2b5" model_8bit = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", load_in_8bit=True) ``` `accelerate`を使用して各GPUに割り当てるGPU RAMを制御する際には、以下のように`max_memory`引数を使用します: ```py max_memory_mapping = {0: "1GB", 1: "2GB"} model_name = "bigscience/bloom-3b" model_8bit = AutoModelForCausalLM.from_pretrained( model_name, device_map="auto", load_in_8bit=True, max_memory=max_memory_mapping ) ``` In this example, the first GPU will use 1GB of memory and the second 2GB. ### Colab demos この方法を使用すると、以前のGoogle Colabでは推論できなかったモデルに対して推論を行うことができます。以下は、Google Colabで8ビット量子化を使用してT5-11b(fp32で42GB)を実行するデモのリンクです: [![Open In Colab: T5-11b demo](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1YORPWx4okIHXnjW7MSAidXN29mPVNT7F?usp=sharing) また、BLOOM-3Bのデモもご覧いただけます: [![Open In Colab: BLOOM-3b demo](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1qOjXfQIAULfKvZqwCen8-MoWKGdSatZ4?usp=sharing) ## Advanced usage: mixing FP4 (or Int8) and BetterTransformer 異なる方法を組み合わせて、モデルの最適なパフォーマンスを得ることができます。例えば、BetterTransformerを使用してFP4ミックスプレシジョン推論とフラッシュアテンションを組み合わせることができます。 ```py import torch from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig quantization_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16 ) tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m", quantization_config=quantization_config) input_text = "Hello my dog is cute and" inputs = tokenizer(input_text, return_tensors="pt").to("cuda") with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False): outputs = model.generate(**inputs) print(tokenizer.decode(outputs[0], skip_special_tokens=True)) ```
transformers/docs/source/ja/perf_infer_gpu_one.md/0
{ "file_path": "transformers/docs/source/ja/perf_infer_gpu_one.md", "repo_id": "transformers", "token_count": 9331 }
275
<!-- Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ このファイルはMarkdown形式ですが、特定のMDXに類似したドキュメントビルダーの構文を含んでおり、 Markdownビューアーで正しく表示されないことがあります。 --> # Preprocess [[open-in-colab]] データセットでモデルをトレーニングする前に、それをモデルの期待する入力形式に前処理する必要があります。 データがテキスト、画像、またはオーディオであるかどうかにかかわらず、それらはテンソルのバッチに変換して組み立てる必要があります。 🤗 Transformersは、データをモデル用に準備するのに役立つ前処理クラスのセットを提供しています。 このチュートリアルでは、次のことを学びます: * テキストの場合、[Tokenizer](./main_classes/tokenizer)を使用してテキストをトークンのシーケンスに変換し、トークンの数値表現を作成し、それらをテンソルに組み立てる方法。 * 音声とオーディオの場合、[Feature extractor](./main_classes/feature_extractor)を使用してオーディオ波形から連続的な特徴を抽出し、それらをテンソルに変換する方法。 * 画像入力の場合、[ImageProcessor](./main_classes/image)を使用して画像をテンソルに変換する方法。 * マルチモーダル入力の場合、[Processor](./main_classes/processors)を使用してトークナイザと特徴抽出器または画像プロセッサを組み合わせる方法。 <Tip> `AutoProcessor`は常に動作し、使用するモデルに適切なクラスを自動的に選択します。 トークナイザ、画像プロセッサ、特徴抽出器、またはプロセッサを使用しているかにかかわらず、動作します。 </Tip> 始める前に、🤗 Datasetsをインストールして、いくつかのデータセットを試すことができるようにしてください: ```bash pip install datasets ``` ## Natural Language Processing <Youtube id="Yffk5aydLzg"/> テキストデータの前処理に使用する主要なツールは、[トークナイザ](main_classes/tokenizer)です。トークナイザは、一連のルールに従ってテキストを*トークン*に分割します。トークンは数値に変換され、その後テンソルに変換され、モデルの入力となります。モデルが必要とする追加の入力は、トークナイザによって追加されます。 <Tip> 事前学習済みモデルを使用する予定の場合、関連する事前学習済みトークナイザを使用することが重要です。これにより、テキストが事前学習コーパスと同じ方法で分割され、事前学習中に通常*ボキャブ*として参照される対応するトークンインデックスを使用します。 </Tip> [`AutoTokenizer.from_pretrained`]メソッドを使用して事前学習済みトークナイザをロードして、開始しましょう。これにより、モデルが事前学習された*ボキャブ*がダウンロードされます: ```python >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-cased") ``` 次に、テキストをトークナイザに渡します: ```py >>> encoded_input = tokenizer("Do not meddle in the affairs of wizards, for they are subtle and quick to anger.") >>> print(encoded_input) {'input_ids': [101, 2079, 2025, 19960, 10362, 1999, 1996, 3821, 1997, 16657, 1010, 2005, 2027, 2024, 11259, 1998, 4248, 2000, 4963, 1012, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} ``` トークナイザは、重要な3つの項目を持つ辞書を返します: * [input_ids](glossary#input-ids) は文中の各トークンに対応するインデックスです。 * [attention_mask](glossary#attention-mask) はトークンがアテンションを受ける必要があるかどうかを示します。 * [token_type_ids](glossary#token-type-ids) は複数のシーケンスがある場合、トークンがどのシーケンスに属しているかを識別します。 `input_ids` をデコードして入力を返します: ```python >>> tokenizer.decode(encoded_input["input_ids"]) '[CLS] 魔法使いの事に干渉するな、彼らは微妙で怒りっぽい。 [SEP]' ``` 如何にお分かりいただけるかと思いますが、トークナイザはこの文章に2つの特別なトークン、`CLS`(クラシファイア)と`SEP`(セパレータ)を追加しました。 すべてのモデルが特別なトークンを必要とするわけではありませんが、必要な場合、トークナイザは自動的にそれらを追加します。 複数の文章を前処理する場合、トークナイザにリストとして渡してください: ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_inputs = tokenizer(batch_sentences) >>> print(encoded_inputs) {'input_ids': [[101, 1252, 1184, 1164, 1248, 6462, 136, 102], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1]]} ``` ### Pad 文章は常に同じ長さではないことがあり、これはテンソル(モデルの入力)が均一な形状を持つ必要があるため問題となります。 パディングは、短い文に特別な「パディングトークン」を追加して、テンソルを長いシーケンスに合わせるための戦略です。 バッチ内の短いシーケンスを最長のシーケンスに合わせるために、`padding`パラメータを`True`に設定します: ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_input = tokenizer(batch_sentences, padding=True) >>> print(encoded_input) {'input_ids': [[101, 1252, 1184, 1164, 1248, 6462, 136, 102, 0, 0, 0, 0, 0, 0, 0], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]]} ``` 1番目と3番目の文は、短いために`0`でパディングされています。 ### Truncation 逆のスペクトルでは、時折、モデルが処理するのに長すぎるシーケンスがあるかもしれません。この場合、シーケンスを短縮する必要があります。 モデルが受け入れる最大の長さにシーケンスを切り詰めるには、`truncation`パラメータを`True`に設定します: ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_input = tokenizer(batch_sentences, padding=True, truncation=True) >>> print(encoded_input) {'input_ids': [[101, 1252, 1184, 1164, 1248, 6462, 136, 102, 0, 0, 0, 0, 0, 0, 0], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]]} ``` <Tip> 異なるパディングと切り詰めの引数について詳しくは、[パディングと切り詰め](./pad_truncation)のコンセプトガイドをご覧ください。 </Tip> ### Build tensors 最後に、トークナイザがモデルに供給される実際のテンソルを返すように設定します。 `return_tensors`パラメータを`pt`(PyTorch用)または`tf`(TensorFlow用)に設定します: <frameworkcontent> <pt> ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_input = tokenizer(batch_sentences, padding=True, truncation=True, return_tensors="pt") >>> print(encoded_input) {'input_ids': tensor([[101, 1252, 1184, 1164, 1248, 6462, 136, 102, 0, 0, 0, 0, 0, 0, 0], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102, 0, 0, 0, 0, 0, 0, 0, 0]]), 'token_type_ids': tensor([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]])} ``` </pt> <tf> ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_input = tokenizer(batch_sentences, padding=True, truncation=True, return_tensors="tf") >>> print(encoded_input) {'input_ids': <tf.Tensor: shape=(2, 9), dtype=int32, numpy= array([[101, 1252, 1184, 1164, 1248, 6462, 136, 102, 0, 0, 0, 0, 0, 0, 0], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=int32)>, 'token_type_ids': <tf.Tensor: shape=(2, 9), dtype=int32, numpy= array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=int32)>, 'attention_mask': <tf.Tensor: shape=(2, 9), dtype=int32, numpy= array([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=int32)>} ``` </tf> </frameworkcontent> ## Audio オーディオタスクの場合、データセットをモデル用に準備するために[特徴抽出器](main_classes/feature_extractor)が必要です。 特徴抽出器は生のオーディオデータから特徴を抽出し、それらをテンソルに変換するために設計されています。 [PolyAI/minds14](https://huggingface.co/datasets/PolyAI/minds14)データセットをロードして(データセットのロード方法の詳細については🤗 [Datasetsチュートリアル](https://huggingface.co/docs/datasets/load_hub)を参照)、 オーディオデータセットで特徴抽出器をどのように使用できるかを確認してみましょう: ```python >>> from datasets import load_dataset, Audio >>> dataset = load_dataset("PolyAI/minds14", name="en-US", split="train") ``` アクセスして`audio`列の最初の要素を確認します。`audio`列を呼び出すと、自動的にオーディオファイルが読み込まれ、リサンプリングされます: ```py >>> dataset[0]["audio"] {'array': array([ 0. , 0.00024414, -0.00024414, ..., -0.00024414, 0. , 0. ], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', 'sampling_rate': 8000} ``` これにより、3つのアイテムが返されます: * `array` は読み込まれた音声信号で、1Dの配列として読み込まれます。必要に応じてリサンプリングされることもあります。 * `path` は音声ファイルの場所を指します。 * `sampling_rate` は音声信号内のデータポイントが1秒間にいくつ測定されるかを示します。 このチュートリアルでは、[Wav2Vec2](https://huggingface.co/facebook/wav2vec2-base)モデルを使用します。 モデルカードを確認すると、Wav2Vec2が16kHzのサンプリングされた音声オーディオで事前学習されていることがわかります。 モデルの事前学習に使用されたデータセットのサンプリングレートと、あなたのオーディオデータのサンプリングレートが一致することが重要です。 データのサンプリングレートが異なる場合、データをリサンプリングする必要があります。 1. 🤗 Datasetsの [`~datasets.Dataset.cast_column`] メソッドを使用して、サンプリングレートを16kHzにアップサンプリングします: ```py >>> dataset = dataset.cast_column("audio", Audio(sampling_rate=16_000)) ``` 2. 再び `audio` 列を呼び出してオーディオファイルをリサンプルします: ```py >>> dataset[0]["audio"] {'array': array([ 2.3443763e-05, 2.1729663e-04, 2.2145823e-04, ..., 3.8356509e-05, -7.3497440e-06, -2.1754686e-05], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', 'sampling_rate': 16000} ``` 次に、入力を正規化しパディングするために特徴抽出器をロードします。テキストデータをパディングする場合、短いシーケンスには `0` が追加されます。同じ考え方がオーディオデータにも適用されます。特徴抽出器は `array` に `0` を追加します(これは無音として解釈されます)。 [`AutoFeatureExtractor.from_pretrained`]を使用して特徴抽出器をロードします: ```python >>> from transformers import AutoFeatureExtractor >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base") ``` オーディオ `array` を特徴抽出器に渡します。特徴抽出器で発生する可能性のある無音エラーをより良くデバッグするために、特徴抽出器に `sampling_rate` 引数を追加することをお勧めします。 ```python >>> audio_input = [dataset[0]["audio"]["array"]] >>> feature_extractor(audio_input, sampling_rate=16000) {'input_values': [array([ 3.8106556e-04, 2.7506407e-03, 2.8015103e-03, ..., 5.6335266e-04, 4.6588284e-06, -1.7142107e-04], dtype=float32)]} ``` 同様に、トークナイザと同様に、バッチ内の可変シーケンスを処理するためにパディングまたは切り詰めを適用できます。次に、これらの2つのオーディオサンプルのシーケンス長を確認してみましょう: ```python >>> dataset[0]["audio"]["array"].shape (173398,) >>> dataset[1]["audio"]["array"].shape (106496,) ``` この関数は、データセットを前処理してオーディオサンプルの長さを同じにするためのものです。最大サンプル長を指定し、特徴抽出器はシーケンスをそれに合わせてパディングまたは切り詰めます。 ```py >>> def preprocess_function(examples): ... audio_arrays = [x["array"] for x in examples["audio"]] ... inputs = feature_extractor( ... audio_arrays, ... sampling_rate=16000, ... padding=True, ... max_length=100000, ... truncation=True, ... ) ... return inputs ``` `preprocess_function`をデータセットの最初の数例に適用します: ```python >>> processed_dataset = preprocess_function(dataset[:5]) ``` サンプルの長さは現在同じで、指定された最大長と一致しています。これで処理されたデータセットをモデルに渡すことができます! ```py >>> processed_dataset["input_values"][0].shape (100000,) >>> processed_dataset["input_values"][1].shape (100000,) ``` ## Computer Vision コンピュータビジョンタスクでは、モデル用にデータセットを準備するための[画像プロセッサ](main_classes/image_processor)が必要です。 画像の前処理には、画像をモデルが期待する入力形式に変換するためのいくつかのステップが含まれています。これらのステップには、リサイズ、正規化、カラーチャネルの補正、および画像をテンソルに変換するなどが含まれます。 <Tip> 画像の前処理は、通常、画像の増強の形式に従います。画像の前処理と画像の増強の両方は画像データを変換しますが、異なる目的があります: * 画像の増強は、過学習を防ぎ、モデルの堅牢性を向上させるのに役立つ方法で画像を変更します。データを増強する方法は無限で、明るさや色の調整、クロップ、回転、リサイズ、ズームなど、様々な方法があります。ただし、増強操作によって画像の意味が変わらないように注意する必要があります。 * 画像の前処理は、画像がモデルの期待する入力形式と一致することを保証します。コンピュータビジョンモデルをファインチューニングする場合、画像はモデルが最初にトレーニングされたときとまったく同じ方法で前処理する必要があります。 画像の増強には任意のライブラリを使用できます。画像の前処理には、モデルに関連付けられた`ImageProcessor`を使用します。 </Tip> コンピュータビジョンのデータセットで画像プロセッサを使用する方法を示すために、[food101](https://huggingface.co/datasets/food101)データセットをロードします(データセットのロード方法の詳細については🤗[Datasetsチュートリアル](https://huggingface.co/docs/datasets/load_hub)を参照): <Tip> データセットがかなり大きいため、🤗 Datasetsの`split`パラメータを使用してトレーニングデータの小さなサンプルのみをロードします! </Tip> ```python >>> from datasets import load_dataset >>> dataset = load_dataset("food101", split="train[:100]") ``` 次に、🤗 Datasetsの [`Image`](https://huggingface.co/docs/datasets/package_reference/main_classes?highlight=image#datasets.Image) 機能で画像を見てみましょう: ```python >>> dataset[0]["image"] ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/vision-preprocess-tutorial.png"/> </div> AutoImageProcessorを[`AutoImageProcessor.from_pretrained`]を使用してロードします: ```py >>> from transformers import AutoImageProcessor >>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224") ``` 1. まず、画像の拡張を追加しましょう。好きなライブラリを使用できますが、このチュートリアルではtorchvisionの[`transforms`](https://pytorch.org/vision/stable/transforms.html)モジュールを使用します。別のデータ拡張ライブラリを使用したい場合は、[Albumentations](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification_albumentations.ipynb)または[Kornia notebooks](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification_kornia.ipynb)で詳細を学ぶことができます。 ここでは、[`Compose`](https://pytorch.org/vision/master/generated/torchvision.transforms.Compose.html)を使用していくつかの変換を連鎖させます - [`RandomResizedCrop`](https://pytorch.org/vision/main/generated/torchvision.transforms.RandomResizedCrop.html)と[`ColorJitter`](https://pytorch.org/vision/main/generated/torchvision.transforms.ColorJitter.html)。 サイズの変更に関しては、`image_processor`から画像サイズの要件を取得できます。 一部のモデルでは、正確な高さと幅が必要ですが、他のモデルでは`shortest_edge`のみが定義されています。 ```py >>> from torchvision.transforms import RandomResizedCrop, ColorJitter, Compose >>> size = ( ... image_processor.size["shortest_edge"] ... if "shortest_edge" in image_processor.size ... else (image_processor.size["height"], image_processor.size["width"]) ... ) >>> _transforms = Compose([RandomResizedCrop(size), ColorJitter(brightness=0.5, hue=0.5)]) ``` 2. モデルは[`pixel_values`](model_doc/visionencoderdecoder#transformers.VisionEncoderDecoderModel.forward.pixel_values)を入力として受け取ります。 `ImageProcessor`は画像の正規化と適切なテンソルの生成を処理できます。 一連の画像に対する画像拡張と画像前処理を組み合わせ、`pixel_values`を生成する関数を作成します: ```python >>> def transforms(examples): ... images = [_transforms(img.convert("RGB")) for img in examples["image"]] ... examples["pixel_values"] = image_processor(images, do_resize=False, return_tensors="pt")["pixel_values"] ... return examples ``` <Tip> 上記の例では、画像のサイズ変更を既に画像増強変換で行っているため、`do_resize=False`を設定しました。 適切な `image_processor` からの `size` 属性を活用しています。画像増強中に画像のサイズ変更を行わない場合は、このパラメータを省略してください。 デフォルトでは、`ImageProcessor` がサイズ変更を処理します。 画像を増強変換の一部として正規化したい場合は、`image_processor.image_mean` と `image_processor.image_std` の値を使用してください。 </Tip> 3. 次に、🤗 Datasetsの[`set_transform`](https://huggingface.co/docs/datasets/process#format-transform)を使用して、変換をリアルタイムで適用します: ```python >>> dataset.set_transform(transforms) ``` 4. 画像にアクセスすると、画像プロセッサが `pixel_values` を追加したことがわかります。これで処理済みのデータセットをモデルに渡すことができます! ```python >>> dataset[0].keys() ``` 以下は、変換が適用された後の画像の外観です。 画像はランダムに切り抜かれ、その色の特性も異なります。 ```py >>> import numpy as np >>> import matplotlib.pyplot as plt >>> img = dataset[0]["pixel_values"] >>> plt.imshow(img.permute(1, 2, 0)) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/preprocessed_image.png"/> </div> <Tip> オブジェクト検出、意味セグメンテーション、インスタンスセグメンテーション、およびパノプティックセグメンテーションなどのタスクの場合、`ImageProcessor`は ポスト処理メソッドを提供します。これらのメソッドは、モデルの生の出力を境界ボックスやセグメンテーションマップなどの意味のある予測に変換します。 </Tip> ### Pad 一部の場合、たとえば、[DETR](./model_doc/detr)をファインチューニングする場合、モデルはトレーニング時にスケールの変更を適用します。 これにより、バッチ内の画像のサイズが異なる場合があります。[`DetrImageProcessor`]から[`DetrImageProcessor.pad`]を使用し、 カスタムの`collate_fn`を定義して画像を一緒にバッチ処理できます。 ```py >>> def collate_fn(batch): ... pixel_values = [item["pixel_values"] for item in batch] ... encoding = image_processor.pad(pixel_values, return_tensors="pt") ... labels = [item["labels"] for item in batch] ... batch = {} ... batch["pixel_values"] = encoding["pixel_values"] ... batch["pixel_mask"] = encoding["pixel_mask"] ... batch["labels"] = labels ... return batch ``` ## Multi Modal マルチモーダル入力を使用するタスクの場合、モデル用にデータセットを準備するための[プロセッサ](main_classes/processors)が必要です。プロセッサは、トークナイザや特徴量抽出器などの2つの処理オブジェクトを結合します。 自動音声認識(ASR)のためのプロセッサの使用方法を示すために、[LJ Speech](https://huggingface.co/datasets/lj_speech)データセットをロードします(データセットのロード方法の詳細については🤗 [Datasets チュートリアル](https://huggingface.co/docs/datasets/load_hub)を参照): ```python >>> from datasets import load_dataset >>> lj_speech = load_dataset("lj_speech", split="train") ``` ASR(自動音声認識)の場合、主に `audio` と `text` に焦点を当てているため、他の列を削除できます: ```python >>> lj_speech = lj_speech.map(remove_columns=["file", "id", "normalized_text"]) ``` 次に、`audio`と`text`の列を見てみましょう: ```python >>> lj_speech[0]["audio"] {'array': array([-7.3242188e-04, -7.6293945e-04, -6.4086914e-04, ..., 7.3242188e-04, 2.1362305e-04, 6.1035156e-05], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/917ece08c95cf0c4115e45294e3cd0dee724a1165b7fc11798369308a465bd26/LJSpeech-1.1/wavs/LJ001-0001.wav', 'sampling_rate': 22050} >>> lj_speech[0]["text"] 'Printing, in the only sense with which we are at present concerned, differs from most if not from all the arts and crafts represented in the Exhibition' ``` 常に、オーディオデータセットのサンプリングレートを、モデルの事前学習に使用されたデータセットのサンプリングレートと一致させるように[リサンプル](preprocessing#audio)する必要があります! ```py >>> lj_speech = lj_speech.cast_column("audio", Audio(sampling_rate=16_000)) ``` プロセッサを [`AutoProcessor.from_pretrained`] を使用してロードします: ```py >>> from transformers import AutoProcessor >>> processor = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h") ``` 1. `array`内に含まれるオーディオデータを`input_values`に処理し、`text`を`labels`にトークン化する関数を作成します: ```py >>> def prepare_dataset(example): ... audio = example["audio"] ... example.update(processor(audio=audio["array"], text=example["text"], sampling_rate=16000)) ... return example ``` 2. サンプルに`prepare_dataset`関数を適用します: ```py >>> prepare_dataset(lj_speech[0]) ```
transformers/docs/source/ja/preprocessing.md/0
{ "file_path": "transformers/docs/source/ja/preprocessing.md", "repo_id": "transformers", "token_count": 12720 }
276
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 큰 모델 인스턴스화 [[instantiating-a-big-model]] 매우 큰 사전훈련된 모델을 사용하려면, RAM 사용을 최소화해야 하는 과제가 있습니다. 일반적인 PyTorch 워크플로우는 다음과 같습니다: 1. 무작위 가중치로 모델을 생성합니다. 2. 사전훈련된 가중치를 불러옵니다. 3. 사전훈련된 가중치를 무작위 모델에 적용합니다. 1단계와 2단계 모두 모델의 전체 버전을 메모리에 적재해야 하며, 대부분 문제가 없지만 모델이 기가바이트급의 용량을 차지하기 시작하면 복사본 2개가 RAM을 초과하여 메모리 부족 이슈를 야기할 수 있습니다. 더 심각한 문제는 분산 학습을 위해 `torch.distributed`를 사용하는 경우, 프로세스마다 사전훈련된 모델을 로드하고 복사본을 2개씩 RAM에 저장한다는 것입니다. <Tip> 무작위로 생성된 모델은 "비어 있는" (즉 그때 메모리에 있던 것으로 이뤄진) 텐서로 초기화되며 메모리 공간을 차지합니다. 초기화된 모델/파라미터의 종류에 적합한 분포(예: 정규 분포)에 따른 무작위 초기화는 가능한 한 빠르게 하기 위해 초기화되지 않은 가중치에 대해 3단계 이후에만 수행됩니다! </Tip> 이 안내서에서는 Transformers가 이 문제를 해결하기 위해 제공하는 솔루션을 살펴봅니다. 주의할 점은 아직 활발히 개발 중인 분야이므로 여기서 설명하는 API가 앞으로 약간 변경될 수 있다는 것입니다. ## 샤딩된 체크포인트 [[sharded-checkpoints]] 4.18.0 버전 이후, 10GB 이상의 공간을 차지하는 모델 체크포인트는 자동으로 작은 조각들로 샤딩됩니다. `model.save_pretrained(save_dir)`를 실행할 때 하나의 단일 체크포인트를 가지게 될 대신, 여러 부분 체크포인트(각각의 크기는 10GB 미만)와 매개변수 이름을 해당 파일에 매핑하는 인덱스가 생성됩니다. `max_shard_size` 매개변수로 샤딩 전 최대 크기를 제어할 수 있으므로, 이 예제를 위해 샤드 크기가 작은 일반 크기의 모델을 사용하겠습니다: 전통적인 BERT 모델을 사용해 봅시다. ```py from transformers import AutoModel model = AutoModel.from_pretrained("google-bert/bert-base-cased") ``` [`~PreTrainedModel.save_pretrained`]을 사용하여 모델을 저장하면, 모델의 구성과 가중치가 들어있는 두 개의 파일이 있는 새 폴더가 생성됩니다: ```py >>> import os >>> import tempfile >>> with tempfile.TemporaryDirectory() as tmp_dir: ... model.save_pretrained(tmp_dir) ... print(sorted(os.listdir(tmp_dir))) ['config.json', 'pytorch_model.bin'] ``` 이제 최대 샤드 크기를 200MB로 사용해 봅시다: ```py >>> with tempfile.TemporaryDirectory() as tmp_dir: ... model.save_pretrained(tmp_dir, max_shard_size="200MB") ... print(sorted(os.listdir(tmp_dir))) ['config.json', 'pytorch_model-00001-of-00003.bin', 'pytorch_model-00002-of-00003.bin', 'pytorch_model-00003-of-00003.bin', 'pytorch_model.bin.index.json'] ``` 모델의 구성에 더해, 세 개의 다른 가중치 파일과 파라미터 이름과 해당 파일의 매핑이 포함된 `index.json` 파일을 볼 수 있습니다. 이러한 체크포인트는 [`~PreTrainedModel.from_pretrained`] 메서드를 사용하여 완전히 다시 로드할 수 있습니다: ```py >>> with tempfile.TemporaryDirectory() as tmp_dir: ... model.save_pretrained(tmp_dir, max_shard_size="200MB") ... new_model = AutoModel.from_pretrained(tmp_dir) ``` 큰 모델의 경우 이러한 방식으로 처리하는 주된 장점은 위에서 보여준 흐름의 2단계에서, 각 샤드가 이전 샤드 다음에 로드되므로 메모리 사용량이 모델 크기와 가장 큰 샤드의 크기를 초과하지 않는다는 점입니다. 이 인덱스 파일은 키가 체크포인트에 있는지, 그리고 해당 가중치가 어디에 저장되어 있는지를 결정하는 데 사용됩니다. 이 인덱스를 json과 같이 로드하고 딕셔너리를 얻을 수 있습니다: ```py >>> import json >>> with tempfile.TemporaryDirectory() as tmp_dir: ... model.save_pretrained(tmp_dir, max_shard_size="200MB") ... with open(os.path.join(tmp_dir, "pytorch_model.bin.index.json"), "r") as f: ... index = json.load(f) >>> print(index.keys()) dict_keys(['metadata', 'weight_map']) ``` 메타데이터는 현재 모델의 총 크기만 포함됩니다. 앞으로 다른 정보를 추가할 계획입니다: ```py >>> index["metadata"] {'total_size': 433245184} ``` 가중치 맵은 이 인덱스의 주요 부분으로, 각 매개변수 이름(PyTorch 모델 `state_dict`에서 보통 찾을 수 있는)을 해당 파일에 매핑합니다: ```py >>> index["weight_map"] {'embeddings.LayerNorm.bias': 'pytorch_model-00001-of-00003.bin', 'embeddings.LayerNorm.weight': 'pytorch_model-00001-of-00003.bin', ... ``` 만약 [`~PreTrainedModel.from_pretrained`]를 사용하지 않고 모델 내에서 이러한 샤딩된 체크포인트를 직접 가져오려면 (전체 체크포인트를 위해 `model.load_state_dict()`를 수행하는 것처럼), [`~modeling_utils.load_sharded_checkpoint`]를 사용해야 합니다. ```py >>> from transformers.modeling_utils import load_sharded_checkpoint >>> with tempfile.TemporaryDirectory() as tmp_dir: ... model.save_pretrained(tmp_dir, max_shard_size="200MB") ... load_sharded_checkpoint(model, tmp_dir) ``` ## 저(低)메모리 로딩 [[low-memory-loading]] 샤딩된 체크포인트는 위에서 언급한 작업 흐름의 2단계에서 메모리 사용량을 줄이지만, 저(低)메모리 설정에서 모델을 사용하기 위해 우리의 Accelerate 라이브러리를 기반으로 한 도구를 활용하는 것이 좋습니다. 자세한 사항은 다음 가이드를 참조해주세요: [Accelerate로 대규모 모델 가져오기 (영문)](../en/main_classes/model#large-model-loading)
transformers/docs/source/ko/big_models.md/0
{ "file_path": "transformers/docs/source/ko/big_models.md", "repo_id": "transformers", "token_count": 4438 }
277
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Whisper [[whisper]] ## 개요 [[overview]] Whisper 모델은 Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, Ilya Sutskever에 의해 [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf)에서 제안되었습니다. 논문의 초록은 다음과 같습니다: *우리는 인터넷에서 대량의 오디오를 글로 옮긴 것을 예측하도록 간단히 훈련된 음성 처리 시스템의 성능을 연구합니다. 68만 시간의 다국어 및 다중 작업 지도(multitask supervision)에 확장했을 때, 결과 모델은 표준 벤치마크에 잘 일반화되며, 미세 조정이 필요 없는 제로샷 전송 설정에서 이전의 완전히 지도된(fully-supervised) 결과와 경쟁할 수 있는 경우가 많습니다. 사람과 비교하면, 이 모델은 사람의 정확도와 견고성에 근접합니다. 우리는 강력한 음성 처리를 위한 추가 작업의 기반이 될 모델과 추론 코드를 공개합니다.* 팁: - 이 모델은 일반적으로 별도의 미세 조정 없이도 잘 작동합니다. - 아키텍처는 고전적인 인코더-디코더 아키텍처를 따르기 때문에, 추론을 위해 [`~generation.GenerationMixin.generate`] 함수를 사용합니다. - 현재 추론은 짧은 형식에만 구현되어 있으며, 오디오는 30초 미만의 세그먼트로 미리 분할되어야 합니다. 타임스탬프를 포함한 긴 형식에 대한 추론은 향후 릴리스에서 구현될 예정입니다. - [`WhisperProcessor`]를 사용하여 모델에 사용할 오디오를 준비하고, 예측된 ID를 텍스트로 디코딩할 수 있습니다. - 모델과 프로세서를 변환하려면 다음을 사용하는 것이 좋습니다: ```bash python src/transformers/models/whisper/convert_openai_to_hf.py --checkpoint_path "" --pytorch_dump_folder_path "Arthur/whisper-3" --convert_preprocessor True ``` 스크립트는 OpenAI 체크포인트에서 필요한 모든 매개변수를 자동으로 결정합니다. OpenAI 변환을 수행하려면 `tiktoken` 라이브러리를 설치해야 합니다. 라이브러리를 설치해야 OpenAI 토큰화기를 `tokenizers` 버전으로 변환할 수 있습니다. 이 모델은 [Arthur Zucker](https://huggingface.co/ArthurZ)에 의해 제공되었습니다. 이 모델의 Tensorflow 버전은 [amyeroberts](https://huggingface.co/amyeroberts)에 의해 제공되었습니다. 원본 코드는 [여기](https://github.com/openai/whisper)에서 찾을 수 있습니다. ## WhisperConfig [[whisperconfig]] [[autodoc]] WhisperConfig ## WhisperTokenizer [[whispertokenizer]] [[autodoc]] WhisperTokenizer - set_prefix_tokens - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## WhisperTokenizerFast [[whispertokenizerfast]] [[autodoc]] WhisperTokenizerFast - set_prefix_tokens - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## WhisperFeatureExtractor [[whisperfeatureextractor]] [[autodoc]] WhisperFeatureExtractor - __call__ ## WhisperProcessor [[whisperprocessor]] [[autodoc]] WhisperProcessor - __call__ - from_pretrained - save_pretrained - batch_decode - decode ## WhisperModel [[whispermodel]] [[autodoc]] WhisperModel - forward - _mask_input_features ## WhisperForConditionalGeneration [[whisperforconditionalgeneration]] [[autodoc]] WhisperForConditionalGeneration - forward ## WhisperForAudioClassification [[whisperforaudioclassification]] [[autodoc]] WhisperForAudioClassification - forward ## TFWhisperModel [[tfwhispermodel]] [[autodoc]] TFWhisperModel - call ## TFWhisperForConditionalGeneration [[tfwhisperforconditionalgeneration]] [[autodoc]] TFWhisperForConditionalGeneration - call ## FlaxWhisperModel [[flaxwhispermodel]] [[autodoc]] FlaxWhisperModel - __call__ ## FlaxWhisperForConditionalGeneration [[flaxwhisperforconditionalgeneration]] [[autodoc]] FlaxWhisperForConditionalGeneration - __call__ ## FlaxWhisperForAudioClassification [[flaxwhisperforaudioclassification]] [[autodoc]] FlaxWhisperForAudioClassification - __call__
transformers/docs/source/ko/model_doc/whisper.md/0
{ "file_path": "transformers/docs/source/ko/model_doc/whisper.md", "repo_id": "transformers", "token_count": 2696 }
278
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # 고정 길이 모델의 펄플렉서티(Perplexity)[[perplexity-of-fixedlength-models]] [[open-in-colab]] 펄플렉서티(Perplexity, PPL)는 가장 일반적인 언어 모델 평가지표 중 하나입니다. 자세히 알아보기 전에 이 평가지표는 고전적인 언어 모델(자기회귀 또는 인과적 언어 모델이라고도 함)에만 적용되며 BERT와 같은 마스킹된 언어 모델에는 잘 적용하지 않습니다 (BERT는 [summary of the models](../en/model_summary) 문서를 참고하세요). 펄플렉서티는 시퀀스의 음의 로그 우도(negative log-likelihood, NLL) 값의 평균에 지수(exponentiate)를 취한 값으로 정의됩니다. 토큰화된 시퀀스 \\(X = (x_0, x_1, \dots, x_t)\\) 가 있을 때, \\(X\\) 의 펄플렉서티는 아래 수식과 같이 구할 수 있습니다. $$\text{PPL}(X) = \exp \left\{ {-\frac{1}{t}\sum_i^t \log p_\theta (x_i|x_{<i}) } \right\}$$ \\(\log p_\theta (x_i|x_{<i})\\) 는 모델에 i번째 이전까지 토큰이 주어졌을 때 i번째 토큰의 로그 우도값입니다. 직관적으로 말뭉치에서 지정된 토큰 집합을 균일하게 예측하는 모델의 능력에 대한 평가로 생각할 수 있습니다. 중요한 점은 토큰화 과정이 모델의 펄플렉서티에 직접적인 영향을 미치므로 서로 다른 모델을 비교할 때 항상 이를 고려해야 합니다. 이는 데이터와 모델 예측 간의 cross-entropy 값에 지수를 취한 것과 동일합니다. 펄플렉서티와 문자당 비트 수(BPC) 및 데이터 압축과의 관계에 대해 더 직관적인 이해를 원하신다면 다음 글 [fantastic blog post on The Gradient](https://thegradient.pub/understanding-evaluation-metrics-for-language-models/)을 확인하세요. ## 고정 길이 모델의 펄플렉서티(PPL) 계산하기[[calculating-ppl-with-fixedlength-models]] 모델의 컨텍스트 크기가 정해져있지 않다면, 아래와 같이 시퀀스를 자동 회귀적으로 분해하고 각 단계에서 선행 하는 전체 시퀀스를 조건부 확률에 넣어 모델의 펄플렉서티를 계산할 것입니다. <img width="600" alt="Full decomposition of a sequence with unlimited context length" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/ppl_full.gif"/> 그러나 모델의 근사치를 구할 때는 일반적으로 모델이 처리할 수 있는 토큰 수에 제한이 있습니다. 예를 들어, 가장 큰 버전의 [GPT-2](model_doc/gpt2)는 토큰의 길이가 1024로 고정되어 있습니다. 따라서 \\(t\\) 가 1024보다 큰 경우에 \\(p_\theta(x_t|x_{<t})\\) 을 계산할 수 없습니다. 대신 시퀀스는 일반적으로 모델의 최대 입력 크기와 동일한 길이는 가지는 부분 시퀀스로 쪼갭니다. 만약 모델의 최대 입력 길이가 \\(k\\) 라면, 토큰 \\(x_t\\) 의 우도 값을 계산할 때 이전 토큰을 모두 사용하지 않고, \\(k-1\\) 토큰까지 사용해 대략적인 우도 값을 추정합니다. 모델의 시퀀스에 대한 펄플렉서티를 계산할 때, 수월하지만 차선책은 시퀀스를 청크로 쪼개고 분해된 각 부분의 로그 우도 값을 독립적으로 합산하는 것입니다. <img width="600" alt="Suboptimal PPL not taking advantage of full available context" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/ppl_chunked.gif"/> 이 방법은 각 부분의 펄플렉서티를 한 번의 포워드 패스로 계산할 수 있어 빠르지만 일반적으로 더 높은(더 나쁜) PPL을 산출합니다. 왜냐하면 대부분의 예측 단계에서 모델의 컨텍스트가 적기 때문입니다. 대신, 고정 길이 모델의 PPL은 슬라이딩 윈도우 전략으로 평가해야 합니다. 이 전략에는 컨텍스트 윈도우을 반복적으로 슬라이딩해 모델이 각 예측을 수행할 때 더 많은 컨텍스트를 갖도록 하는 작업이 포함됩니다. <img width="600" alt="Sliding window PPL taking advantage of all available context" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/ppl_sliding.gif"/> 이는 시퀀스 확률의 실제 분해에 더 가까운 근사치이며 일반적으로 더 유리한 점수를 산출합니다. 단점은 말뭉치의 각 토큰에 대해 별도의 포워드 패스가 필요하다는 것입니다. 현실적으로 좋은 절충안은 한 번에 한 토큰씩 슬라이딩하는 것이 아니라 더 큰 간격으로 컨텍스트를 이동하는 스트라이드가 적용된 슬라이딩 윈도우을 사용하는 것입니다. 이렇게 하면 계산을 훨씬 더 빠르게 진행하면서도 모델에 각 단계에서 예측을 수행할 수 있는 긴 컨텍스트를 제공할 수 있습니다. ## 예제: 🤗 Transformers에서 GPT-2로 펄플렉서티(perplexity) 계산하기[[example-calculating-perplexity-with-gpt2-in-transformers]] 이제 GPT-2로 위의 과정을 시연해 보겠습니다. ```python from transformers import GPT2LMHeadModel, GPT2TokenizerFast device = "cuda" model_id = "openai-community/gpt2-large" model = GPT2LMHeadModel.from_pretrained(model_id).to(device) tokenizer = GPT2TokenizerFast.from_pretrained(model_id) ``` WikiText-2 데이터 세트를 가져오고 몇 가지 슬라이딩 윈도우 전략을 사용해 펄플렉서티를 계산해보겠습니다. 이 데이터 세트는 크기가 작고 포워드 패스 한 번만 수행하기 때문에 전체 데이터 세트를 메모리에 가져오고 인코딩할 수 있습니다. ```python from datasets import load_dataset test = load_dataset("wikitext", "wikitext-2-raw-v1", split="test") encodings = tokenizer("\n\n".join(test["text"]), return_tensors="pt") ``` 🤗 Transformers를 사용하면 모델의 `labels`로 `input_ids`를 전달해 각 토큰에 대한 평균 음의 우도 값을 손실로 반환할 수 있습니다. 하지만 슬라이딩 윈도우 방식을 사용하면 각 반복마다 모델에 전달하는 토큰이 겹칩니다. 컨텍스트로 처리하는 토큰에 대한 로그 우도 값이 손실에 포함되는 것을 원하지 않기 때문에 이러한 토큰의 `input_ids`를 `-100`으로 설정하여 무시할 수 있습니다. 다음은 스트라이드(stride)를 `512`로 사용한 예시입니다. 즉, 모델이 한 토큰의 조건부 우도 값을 계산할 때 컨텍스트에 최소한 512개의 토큰이 포함되어있다는 의미입니다 (해당 토큰 앞에 512개의 토큰이 있는 경우). ```python import torch from tqdm import tqdm max_length = model.config.n_positions stride = 512 seq_len = encodings.input_ids.size(1) nlls = [] prev_end_loc = 0 for begin_loc in tqdm(range(0, seq_len, stride)): end_loc = min(begin_loc + max_length, seq_len) trg_len = end_loc - prev_end_loc # 마지막 루프의 스트라이드 값과 다를 수 있음 input_ids = encodings.input_ids[:, begin_loc:end_loc].to(device) target_ids = input_ids.clone() target_ids[:, :-trg_len] = -100 with torch.no_grad(): outputs = model(input_ids, labels=target_ids) # 손실은 모든 유효한 레이블에 대한 평균값을 구하는 교차 엔트로피(cross entropy)로 계산됩니다. # 나이브 베이지안 모델은 내부적으로 레이블을 왼쪽으로 1개씩 밀기 때문에, (타켓 - 1)개 만큼의 레이블에 대해 손실을 계산합니다. neg_log_likelihood = outputs.loss nlls.append(neg_log_likelihood) prev_end_loc = end_loc if end_loc == seq_len: break ppl = torch.exp(torch.stack(nlls).mean()) ``` 스트라이드를 최대 입력 길이와 동일하게 설정하면 위에서 설명한 차선책인 비슬라이딩 윈도우 전략과 동일합니다. 일반적으로 스트라이드가 작을수록 모델이 각 예측을 할 때 더 많은 컨텍스트를 볼 수 있게 되어 펄플렉서티 값이 좋아집니다. 위의 계산을 토큰이 겹치지 않도록 `stride = 1024`로 설정하면 PPL은 `19.44`로 GPT-2 논문에서 보고된 `19.93`과 거의 동일합니다. `stride = 512`로 슬라이딩 윈도우 전략을 사용하면 PPL은 `16.45`로 떨어집니다. 이는 더 좋은 점수일 뿐만 아니라 시퀀스 확률의 실제 자동 회귀 분해에 더 가까운 방식으로 계산됩니다.
transformers/docs/source/ko/perplexity.md/0
{ "file_path": "transformers/docs/source/ko/perplexity.md", "repo_id": "transformers", "token_count": 6268 }
279
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 테스트[[testing]] 먼저 🤗 Transformers 모델이 어떻게 테스트되는지 살펴보고, 새로운 테스트를 작성 및 기존 테스트를 개선하는 방법을 알아봅시다. 이 저장소에는 2개의 테스트 스위트가 있습니다: 1. `tests` - 일반 API에 대한 테스트 2. `examples` - API의 일부가 아닌 다양한 응용 프로그램에 대한 테스트 ## Transformers 테스트 방법[[how-transformers-are-tested]] 1. PR이 제출되면 9개의 CircleCi 작업으로 테스트가 진행됩니다. 해당 PR에 대해 새로운 커밋이 생성될 때마다 테스트는 다시 진행됩니다. 이 작업들은 이 [config 파일](https://github.com/huggingface/transformers/tree/main/.circleci/config.yml)에 정의되어 있으므로 필요하다면 사용자의 로컬 환경에서 동일하게 재현해 볼 수 있습니다. 이 CI 작업은 `@slow` 테스트를 실행하지 않습니다. 2. [github actions](https://github.com/huggingface/transformers/actions)에 의해 실행되는 작업은 3개입니다: - [torch hub integration](https://github.com/huggingface/transformers/tree/main/.github/workflows/github-torch-hub.yml): torch hub integration이 작동하는지 확인합니다. - [self-hosted (push)](https://github.com/huggingface/transformers/tree/main/.github/workflows/self-push.yml): `main` 브랜치에서 커밋이 업데이트된 경우에만 GPU를 이용한 빠른 테스트를 실행합니다. 이는 `src`, `tests`, `.github` 폴더 중 하나에 코드가 업데이트된 경우에만 실행됩니다. (model card, notebook, 기타 등등을 추가한 경우 실행되지 않도록 하기 위해서입니다) - [self-hosted runner](https://github.com/huggingface/transformers/tree/main/.github/workflows/self-scheduled.yml): `tests` 및 `examples`에서 GPU를 이용한 일반 테스트, 느린 테스트를 실행합니다. ```bash RUN_SLOW=1 pytest tests/ RUN_SLOW=1 pytest examples/ ``` 결과는 [여기](https://github.com/huggingface/transformers/actions)에서 확인할 수 있습니다. ## 테스트 실행[[running-tests]] ### 실행할 테스트 선택[[choosing-which-tests-to-run]] 이 문서는 테스트를 실행하는 다양한 방법에 대해 자세히 설명합니다. 모든 내용을 읽은 후에도, 더 자세한 내용이 필요하다면 [여기](https://docs.pytest.org/en/latest/usage.html)에서 확인할 수 있습니다. 다음은 가장 유용한 테스트 실행 방법 몇 가지입니다. 모두 실행: ```console pytest ``` 또는: ```bash make test ``` 후자는 다음과 같이 정의됩니다: ```bash python -m pytest -n auto --dist=loadfile -s -v ./tests/ ``` 위의 명령어는 pytest에게 아래의 내용을 전달합니다: - 사용 가능한 CPU 코어 수만큼 테스트 프로세스를 실행합니다. (RAM이 충분하지 않다면, 테스트 프로세스 수가 너무 많을 수 있습니다!) - 동일한 파일의 모든 테스트는 동일한 테스트 프로세스에서 실행되어야 합니다. - 출력을 캡처하지 않습니다. - 자세한 모드로 실행합니다. ### 모든 테스트 목록 가져오기[[getting-the-list-of-all-tests]] 테스트 스위트의 모든 테스트: ```bash pytest --collect-only -q ``` 지정된 테스트 파일의 모든 테스트: ```bash pytest tests/test_optimization.py --collect-only -q ``` ### 특정 테스트 모듈 실행[[run-a-specific-test-module]] 개별 테스트 모듈 실행하기: ```bash pytest tests/utils/test_logging.py ``` ### 특정 테스트 실행[[run-specific-tests]] 대부분의 테스트 내부에서는 unittest가 사용됩니다. 따라서 특정 하위 테스트를 실행하려면 해당 테스트를 포함하는 unittest 클래스의 이름을 알아야 합니다. 예를 들어 다음과 같을 수 있습니다: ```bash pytest tests/test_optimization.py::OptimizationTest::test_adam_w ``` 위의 명령어의 의미는 다음과 같습니다: - `tests/test_optimization.py` - 테스트가 있는 파일 - `OptimizationTest` - 클래스의 이름 - `test_adam_w` - 특정 테스트 함수의 이름 파일에 여러 클래스가 포함된 경우, 특정 클래스의 테스트만 실행할 수도 있습니다. 예를 들어 다음과 같습니다: ```bash pytest tests/test_optimization.py::OptimizationTest ``` 이 명령어는 해당 클래스 내부의 모든 테스트를 실행합니다. 앞에서 언급한 것처럼 `OptimizationTest` 클래스에 포함된 테스트를 확인할 수 있습니다. ```bash pytest tests/test_optimization.py::OptimizationTest --collect-only -q ``` 키워드 표현식을 사용하여 테스트를 실행할 수도 있습니다. `adam`이라는 이름을 포함하는 테스트만 실행하려면 다음과 같습니다: ```bash pytest -k adam tests/test_optimization.py ``` 논리 연산자 `and`와 `or`를 사용하여 모든 키워드가 일치해야 하는지 또는 어느 하나가 일치해야 하는지를 나타낼 수 있습니다. `not`은 부정할 때 사용할 수 있습니다. `adam`이라는 이름을 포함하지 않는 모든 테스트를 실행하려면 다음과 같습니다: ```bash pytest -k "not adam" tests/test_optimization.py ``` 두 가지 패턴을 하나로 결합할 수도 있습니다: ```bash pytest -k "ada and not adam" tests/test_optimization.py ``` 예를 들어 `test_adafactor`와 `test_adam_w`를 모두 실행하려면 다음을 사용할 수 있습니다: ```bash pytest -k "test_adam_w or test_adam_w" tests/test_optimization.py ``` 여기서 `or`를 사용하는 것에 유의하세요. 두 키워드 중 하나가 일치하도록 하기 위한 목적으로 사용하기 때문입니다. 두 패턴이 모두 포함되어야 하는 테스트만 실행하려면, `and`를 사용해야 합니다: ```bash pytest -k "test and ada" tests/test_optimization.py ``` ### `accelerate` 테스트 실행[[run-`accelerate`-tests]] 모델에서 `accelerate` 테스트를 실행해야 할 때가 있습니다. 이를 위해서는 명령어에 `-m accelerate_tests`를 추가하면 됩니다. 예를 들어, `OPT`에서 이러한 테스트를 실행하려면 다음과 같습니다: ```bash RUN_SLOW=1 pytest -m accelerate_tests tests/models/opt/test_modeling_opt.py ``` ### 문서 테스트 실행[[run-documentation-tests]] 예시 문서가 올바른지 테스트하려면 `doctests`가 통과하는지 확인해야 합니다. 예를 들어, [`WhisperModel.forward`'s docstring](https://github.com/huggingface/transformers/blob/main/src/transformers/models/whisper/modeling_whisper.py#L1017-L1035)를 사용해 봅시다: ```python r""" Returns: Example: ```python >>> import torch >>> from transformers import WhisperModel, WhisperFeatureExtractor >>> from datasets import load_dataset >>> model = WhisperModel.from_pretrained("openai/whisper-base") >>> feature_extractor = WhisperFeatureExtractor.from_pretrained("openai/whisper-base") >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> inputs = feature_extractor(ds[0]["audio"]["array"], return_tensors="pt") >>> input_features = inputs.input_features >>> decoder_input_ids = torch.tensor([[1, 1]]) * model.config.decoder_start_token_id >>> last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state >>> list(last_hidden_state.shape) [1, 2, 512] ```""" ``` 원하는 파일의 모든 docstring 예제를 자동으로 테스트하려면 다음 명령을 실행하면 됩니다: ```bash pytest --doctest-modules <path_to_file_or_dir> ``` 파일의 확장자가 markdown인 경우 `--doctest-glob="*.md"` 인수를 추가해야 합니다. ### 수정된 테스트만 실행[[run-only-modified-tests]] 수정된 파일 또는 현재 브랜치 (Git 기준)와 관련된 테스트를 실행하려면 [pytest-picked](https://github.com/anapaulagomes/pytest-picked)을 사용할 수 있습니다. 이는 변경한 내용이 테스트에 영향을 주지 않았는지 빠르게 확인할 수 있는 좋은 방법입니다. ```bash pip install pytest-picked ``` ```bash pytest --picked ``` 수정되었지만, 아직 커밋되지 않은 모든 파일 및 폴더에서 테스트가 실행됩니다. ### 소스 수정 시 실패한 테스트 자동 재실행[[automatically-rerun-failed-tests-on-source-modification]] [pytest-xdist](https://github.com/pytest-dev/pytest-xdist)는 모든 실패한 테스트를 감지하고, 파일을 수정한 후에 파일을 계속 재실행하여 테스트가 성공할 때까지 기다리는 매우 유용한 기능을 제공합니다. 따라서 수정한 내용을 확인한 후 pytest를 다시 시작할 필요가 없습니다. 모든 테스트가 통과될 때까지 이 과정을 반복한 후 다시 전체 실행이 이루어집니다. ```bash pip install pytest-xdist ``` 재귀적 모드의 사용: `pytest -f` 또는 `pytest --looponfail` 파일의 변경 사항은 `looponfailroots` 루트 디렉터리와 해당 내용을 (재귀적으로) 확인하여 감지됩니다. 이 값의 기본값이 작동하지 않는 경우, `setup.cfg`의 설정 옵션을 변경하여 프로젝트에서 변경할 수 있습니다: ```ini [tool:pytest] looponfailroots = transformers tests ``` 또는 `pytest.ini`/`tox.ini`` 파일: ```ini [pytest] looponfailroots = transformers tests ``` 이렇게 하면 ini-file의 디렉터리를 기준으로 상대적으로 지정된 각 디렉터리에서 파일 변경 사항만 찾게 됩니다. 이 기능을 대체할 수 있는 구현 방법인 [pytest-watch](https://github.com/joeyespo/pytest-watch)도 있습니다. ### 특정 테스트 모듈 건너뛰기[[skip-a-test-module]] 모든 테스트 모듈을 실행하되 특정 모듈을 제외하려면, 실행할 테스트 목록을 명시적으로 지정할 수 있습니다. 예를 들어, `test_modeling_*.py` 테스트를 제외한 모든 테스트를 실행하려면 다음을 사용할 수 있습니다: ```bash pytest *ls -1 tests/*py | grep -v test_modeling* ``` ### 상태 초기화[[clearing state]] CI 빌드 및 (속도에 대한) 격리가 중요한 경우, 캐시를 지워야 합니다: ```bash pytest --cache-clear tests ``` ### 테스트를 병렬로 실행[[running-tests-in-parallel]] 이전에 언급한 것처럼 `make test`는 테스트를 병렬로 실행하기 위해 `pytest-xdist` 플러그인(`-n X` 인수, 예를 들어 `-n 2`를 사용하여 2개의 병렬 작업 실행)을 통해 실행됩니다. `pytest-xdist`의 `--dist=` 옵션을 사용하여 테스트를 어떻게 그룹화할지 제어할 수 있습니다. `--dist=loadfile`은 하나의 파일에 있는 테스트를 동일한 프로세스로 그룹화합니다. 실행된 테스트의 순서가 다르고 예측할 수 없기 때문에, `pytest-xdist`로 테스트 스위트를 실행하면 실패가 발생할 수 있습니다 (검출되지 않은 결합된 테스트가 있는 경우). 이 경우 [pytest-replay](https://github.com/ESSS/pytest-replay)를 사용하면 동일한 순서로 테스트를 다시 실행해서 실패하는 시퀀스를 최소화하는 데에 도움이 됩니다. ### 테스트 순서와 반복[[test-order-and-repetition]] 잠재적인 종속성 및 상태 관련 버그(tear down)를 감지하기 위해 테스트를 여러 번, 연속으로, 무작위로 또는 세트로 반복하는 것이 좋습니다. 그리고 직접적인 여러 번의 반복은 DL의 무작위성에 의해 발견되는 일부 문제를 감지하는 데에도 유용합니다. #### 테스트를 반복[[repeat-tests]] - [pytest-flakefinder](https://github.com/dropbox/pytest-flakefinder): ```bash pip install pytest-flakefinder ``` 모든 테스트를 여러 번 실행합니다(기본값은 50번): ```bash pytest --flake-finder --flake-runs=5 tests/test_failing_test.py ``` <Tip> 이 플러그인은 `pytest-xdist`의 `-n` 플래그와 함께 작동하지 않습니다. </Tip> <Tip> `pytest-repeat`라는 또 다른 플러그인도 있지만 `unittest`와 함께 작동하지 않습니다. </Tip> #### 테스트를 임의의 순서로 실행[[run-tests-in-a-random-order]] ```bash pip install pytest-random-order ``` 중요: `pytest-random-order`가 설치되면 테스트가 자동으로 임의의 순서로 섞입니다. 구성 변경이나 커맨드 라인 옵션이 필요하지 않습니다. 앞서 설명한 것처럼 이를 통해 한 테스트의 상태가 다른 테스트의 상태에 영향을 미치는 결합된 테스트를 감지할 수 있습니다. `pytest-random-order`가 설치되면 해당 세션에서 사용된 랜덤 시드가 출력되며 예를 들어 다음과 같습니다: ```bash pytest tests [...] Using --random-order-bucket=module Using --random-order-seed=573663 ``` 따라서 특정 시퀀스가 실패하는 경우에는 정확한 시드를 추가하여 재현할 수 있습니다. 예를 들어 다음과 같습니다: ```bash pytest --random-order-seed=573663 [...] Using --random-order-bucket=module Using --random-order-seed=573663 ``` 정확히 동일한 테스트 목록(또는 목록이 없음)을 사용하는 경우에만 정확한 순서를 재현합니다. 목록을 수동으로 좁히기 시작하면 더 이상 시드에 의존할 수 없고 실패했던 정확한 순서로 수동으로 목록을 나열해야합니다. 그리고 `--random-order-bucket=none`을 사용하여 pytest에게 순서를 임의로 설정하지 않도록 알려야 합니다. 예를 들어 다음과 같습니다: ```bash pytest --random-order-bucket=none tests/test_a.py tests/test_c.py tests/test_b.py ``` 모든 테스트에 대해 섞기를 비활성화하려면 다음과 같습니다: ```bash pytest --random-order-bucket=none ``` 기본적으로 `--random-order-bucket=module`이 내재되어 있으므로, 모듈 수준에서 파일을 섞습니다. 또한 `class`, `package`, `global` 및 `none` 수준에서도 섞을 수 있습니다. 자세한 내용은 해당 [문서](https://github.com/jbasko/pytest-random-order)를 참조하세요. 또 다른 무작위화의 대안은 [`pytest-randomly`](https://github.com/pytest-dev/pytest-randomly)입니다. 이 모듈은 매우 유사한 기능/인터페이스를 가지고 있지만, `pytest-random-order`에 있는 버킷 모드를 사용할 수는 없습니다. 설치 후에는 자동으로 적용되는 문제도 동일하게 가집니다. ### 외관과 느낌을 변경[[look-and-feel-variations] #### pytest-sugar 사용[[pytest-sugar]] [pytest-sugar](https://github.com/Frozenball/pytest-sugar)는 테스트가 보여지는 형태를 개선하고, 진행 상황 바를 추가하며, 실패한 테스트와 검증을 즉시 표시하는 플러그인입니다. 설치하면 자동으로 활성화됩니다. ```bash pip install pytest-sugar ``` pytest-sugar 없이 테스트를 실행하려면 다음과 같습니다: ```bash pytest -p no:sugar ``` 또는 제거하세요. #### 각 하위 테스트 이름과 진행 상황 보고[[report-each-sub-test-name-and-its-progress]] `pytest`를 통해 단일 또는 그룹의 테스트를 실행하는 경우(`pip install pytest-pspec` 이후): ```bash pytest --pspec tests/test_optimization.py ``` #### 실패한 테스트 즉시 표시[[instantly-shows-failed-tests]] [pytest-instafail](https://github.com/pytest-dev/pytest-instafail)은 테스트 세션의 끝까지 기다리지 않고 실패 및 오류를 즉시 표시합니다. ```bash pip install pytest-instafail ``` ```bash pytest --instafail ``` ### GPU 사용 여부[[to-GPU-or-not-to-GPU]] GPU가 활성화된 환경에서, CPU 전용 모드로 테스트하려면 `CUDA_VISIBLE_DEVICES=""`를 추가합니다: ```bash CUDA_VISIBLE_DEVICES="" pytest tests/utils/test_logging.py ``` 또는 다중 GPU가 있는 경우 `pytest`에서 사용할 GPU를 지정할 수도 있습니다. 예를 들어, GPU `0` 및 `1`이 있는 경우 다음을 실행할 수 있습니다: ```bash CUDA_VISIBLE_DEVICES="1" pytest tests/utils/test_logging.py ``` 이렇게 하면 다른 GPU에서 다른 작업을 실행하려는 경우 유용합니다. 일부 테스트는 반드시 CPU 전용으로 실행해야 하며, 일부는 CPU 또는 GPU 또는 TPU에서 실행해야 하고, 일부는 여러 GPU에서 실행해야 합니다. 다음 스킵 데코레이터는 테스트의 요구 사항을 CPU/GPU/TPU별로 설정하는 데 사용됩니다: - `require_torch` - 이 테스트는 torch에서만 실행됩니다. - `require_torch_gpu` - `require_torch`에 추가로 적어도 1개의 GPU가 필요합니다. - `require_torch_multi_gpu` - `require_torch`에 추가로 적어도 2개의 GPU가 필요합니다. - `require_torch_non_multi_gpu` - `require_torch`에 추가로 0개 또는 1개의 GPU가 필요합니다. - `require_torch_up_to_2_gpus` - `require_torch`에 추가로 0개, 1개 또는 2개의 GPU가 필요합니다. - `require_torch_xla` - `require_torch`에 추가로 적어도 1개의 TPU가 필요합니다. GPU 요구 사항을 표로 정리하면 아래와 같습니디ㅏ: | n gpus | decorator | |--------+--------------------------------| | `>= 0` | `@require_torch` | | `>= 1` | `@require_torch_gpu` | | `>= 2` | `@require_torch_multi_gpu` | | `< 2` | `@require_torch_non_multi_gpu` | | `< 3` | `@require_torch_up_to_2_gpus` | 예를 들어, 2개 이상의 GPU가 있고 pytorch가 설치되어 있을 때에만 실행되어야 하는 테스트는 다음과 같습니다: ```python no-style @require_torch_multi_gpu def test_example_with_multi_gpu(): ``` `tensorflow`가 필요한 경우 `require_tf` 데코레이터를 사용합니다. 예를 들어 다음과 같습니다: ```python no-style @require_tf def test_tf_thing_with_tensorflow(): ``` 이러한 데코레이터는 중첩될 수 있습니다. 예를 들어, 느린 테스트로 진행되고 pytorch에서 적어도 하나의 GPU가 필요한 경우 다음과 같이 설정할 수 있습니다: ```python no-style @require_torch_gpu @slow def test_example_slow_on_gpu(): ``` `@parametrized`와 같은 일부 데코레이터는 테스트 이름을 다시 작성하기 때문에 `@require_*` 스킵 데코레이터는 올바르게 작동하려면 항상 맨 마지막에 나열되어야 합니다. 다음은 올바른 사용 예입니다: ```python no-style @parameterized.expand(...) @require_torch_multi_gpu def test_integration_foo(): ``` `@pytest.mark.parametrize`에는 이러한 순서 문제는 없으므로 처음 혹은 마지막에 위치시킬 수 있고 이러한 경우에도 잘 작동할 것입니다. 하지만 unittest가 아닌 경우에만 작동합니다. 테스트 내부에서 다음을 사용할 수 있습니다: - 사용 가능한 GPU 수: ```python from transformers.testing_utils import get_gpu_count n_gpu = get_gpu_count() #torch와 tf와 함께 작동 ``` ### 분산 훈련[[distributed-training]] `pytest`는 분산 훈련을 직접적으로 다루지 못합니다. 이를 시도하면 하위 프로세스가 올바른 작업을 수행하지 않고 `pytest`라고 생각하기에 테스트 스위트를 반복해서 실행하게 됩니다. 그러나 일반 프로세스를 생성한 다음 여러 워커를 생성하고 IO 파이프를 관리하도록 하면 동작합니다. 다음은 사용 가능한 테스트입니다: - [test_trainer_distributed.py](https://github.com/huggingface/transformers/tree/main/tests/trainer/test_trainer_distributed.py) - [test_deepspeed.py](https://github.com/huggingface/transformers/tree/main/tests/deepspeed/test_deepspeed.py) 실행 지점으로 바로 이동하려면, 해당 테스트에서 `execute_subprocess_async` 호출을 검색하세요. 이러한 테스트를 실행하려면 적어도 2개의 GPU가 필요합니다. ```bash CUDA_VISIBLE_DEVICES=0,1 RUN_SLOW=1 pytest -sv tests/test_trainer_distributed.py ``` ### 출력 캡처[[output-capture]] 테스트 실행 중 `stdout` 및 `stderr`로 전송된 모든 출력이 캡처됩니다. 테스트나 설정 메소드가 실패하면 캡처된 출력은 일반적으로 실패 추적 정보와 함께 표시됩니다. 출력 캡처를 비활성화하고 `stdout` 및 `stderr`를 정상적으로 받으려면 `-s` 또는 `--capture=no`를 사용하세요: ```bash pytest -s tests/utils/test_logging.py ``` 테스트 결과를 JUnit 형식의 출력으로 보내려면 다음을 사용하세요: ```bash py.test tests --junitxml=result.xml ``` ### 색상 조절[[color-control]] 색상이 없게 하려면 다음과 같이 설정하세요(예를 들어 흰색 배경에 노란색 글씨는 가독성이 좋지 않습니다): ```bash pytest --color=no tests/utils/test_logging.py ``` ### online pastebin service에 테스트 보고서 전송[[sending test report to online pastebin service]] 각 테스트 실패에 대한 URL을 만듭니다: ```bash pytest --pastebin=failed tests/utils/test_logging.py ``` 이렇게 하면 각 실패에 대한 URL을 제공하는 remote Paste service에 테스트 실행 정보를 제출합니다. 일반적인 테스트를 선택할 수도 있고 혹은 특정 실패만 보내려면 `-x`와 같이 추가할 수도 있습니다. 전체 테스트 세션 로그에 대한 URL을 생성합니다: ```bash pytest --pastebin=all tests/utils/test_logging.py ``` ## 테스트 작성[[writing-tests]] 🤗 transformers 테스트는 대부분 `unittest`를 기반으로 하지만, `pytest`에서 실행되므로 대부분의 경우 두 시스템의 기능을 사용할 수 있습니다. 지원되는 기능에 대해 [여기](https://docs.pytest.org/en/stable/unittest.html)에서 확인할 수 있지만, 기억해야 할 중요한 점은 대부분의 `pytest` fixture가 작동하지 않는다는 것입니다. 파라미터화도 작동하지 않지만, 우리는 비슷한 방식으로 작동하는 `parameterized` 모듈을 사용합니다. ### 매개변수화[[parametrization]] 동일한 테스트를 다른 인수로 여러 번 실행해야 하는 경우가 종종 있습니다. 테스트 내에서 이 작업을 수행할 수 있지만, 그렇게 하면 하나의 인수 세트에 대해 테스트를 실행할 수 없습니다. ```python # test_this1.py import unittest from parameterized import parameterized class TestMathUnitTest(unittest.TestCase): @parameterized.expand( [ ("negative", -1.5, -2.0), ("integer", 1, 1.0), ("large fraction", 1.6, 1), ] ) def test_floor(self, name, input, expected): assert_equal(math.floor(input), expected) ``` 이제 기본적으로 이 테스트는 `test_floor`의 마지막 3개 인수가 매개변수 목록의 해당 인수에 할당되는 것으로 3번 실행될 것입니다. 그리고 `negative` 및 `integer` 매개변수 집합만 실행하려면 다음과 같이 실행할 수 있습니다: ```bash pytest -k "negative and integer" tests/test_mytest.py ``` 또는 `negative` 하위 테스트를 제외한 모든 서브 테스트를 다음과 같이 실행할 수 있습니다: ```bash pytest -k "not negative" tests/test_mytest.py ``` 앞에서 언급한 `-k` 필터를 사용하는 것 외에도, 각 서브 테스트의 정확한 이름을 확인한 후에 일부 혹은 전체 서브 테스트를 실행할 수 있습니다. ```bash pytest test_this1.py --collect-only -q ``` 그리고 다음의 내용을 확인할 수 있을 것입니다: ```bash test_this1.py::TestMathUnitTest::test_floor_0_negative test_this1.py::TestMathUnitTest::test_floor_1_integer test_this1.py::TestMathUnitTest::test_floor_2_large_fraction ``` 2개의 특정한 서브 테스트만 실행할 수도 있습니다: ```bash pytest test_this1.py::TestMathUnitTest::test_floor_0_negative test_this1.py::TestMathUnitTest::test_floor_1_integer ``` `transformers`의 개발자 종속성에 이미 있는 [parameterized](https://pypi.org/project/parameterized/) 모듈은 `unittests`와 `pytest` 테스트 모두에서 작동합니다. 그러나 테스트가 `unittest`가 아닌 경우 `pytest.mark.parametrize`를 사용할 수 있습니다(이미 있는 일부 테스트에서 사용되는 경우도 있습니다. 주로 `examples` 하위에 있습니다). 다음은 `pytest`의 `parametrize` 마커를 사용한 동일한 예입니다: ```python # test_this2.py import pytest @pytest.mark.parametrize( "name, input, expected", [ ("negative", -1.5, -2.0), ("integer", 1, 1.0), ("large fraction", 1.6, 1), ], ) def test_floor(name, input, expected): assert_equal(math.floor(input), expected) ``` `parameterized`와 마찬가지로 `pytest.mark.parametrize`를 사용하면 `-k` 필터가 작동하지 않는 경우에도 실행할 서브 테스트를 정확하게 지정할 수 있습니다. 단, 이 매개변수화 함수는 서브 테스트의 이름 집합을 약간 다르게 생성합니다. 다음과 같은 모습입니다: ```bash pytest test_this2.py --collect-only -q ``` 그리고 다음의 내용을 확인할 수 있을 것입니다: ```bash test_this2.py::test_floor[integer-1-1.0] test_this2.py::test_floor[negative--1.5--2.0] test_this2.py::test_floor[large fraction-1.6-1] ``` 특정한 테스트에 대해서만 실행할 수도 있습니다: ```bash pytest test_this2.py::test_floor[negative--1.5--2.0] test_this2.py::test_floor[integer-1-1.0] ``` 이전의 예시와 같이 실행할 수 있습니다. ### 파일 및 디렉터리[[files-and-directories]] 테스트에서 종종 현재 테스트 파일과 관련된 상대적인 위치를 알아야 하는 경우가 있습니다. 테스트가 여러 디렉터리에서 호출되거나 깊이가 다른 하위 디렉터리에 있을 수 있기 때문에 그 위치를 아는 것은 간단하지 않습니다. `transformers.test_utils.TestCasePlus`라는 헬퍼 클래스는 모든 기본 경로를 처리하고 간단한 액세서를 제공하여 이 문제를 해결합니다: - `pathlib` 객체(완전히 정해진 경로) - `test_file_path` - 현재 테스트 파일 경로 (예: `__file__`) - test_file_dir` - 현재 테스트 파일이 포함된 디렉터리 - tests_dir` - `tests` 테스트 스위트의 디렉터리 - examples_dir` - `examples` 테스트 스위트의 디렉터리 - repo_root_dir` - 저장소 디렉터리 - src_dir` - `src`의 디렉터리(예: `transformers` 하위 디렉터리가 있는 곳) - 문자열로 변환된 경로---위와 동일하지만, `pathlib` 객체가 아닌 문자열로 경로를 반환합니다: - `test_file_path_str` - `test_file_dir_str` - `tests_dir_str` - `examples_dir_str` - `repo_root_dir_str` - `src_dir_str` 위의 내용을 사용하려면 테스트가 'transformers.test_utils.TestCasePlus'의 서브클래스에 있는지 확인해야 합니다. 예를 들어 다음과 같습니다: ```python from transformers.testing_utils import TestCasePlus class PathExampleTest(TestCasePlus): def test_something_involving_local_locations(self): data_dir = self.tests_dir / "fixtures/tests_samples/wmt_en_ro" ``` 만약 `pathlib`를 통해 경로를 조작할 필요가 없거나 경로를 문자열로만 필요로 하는 경우에는 `pathlib` 객체에 `str()`을 호출하거나 `_str`로 끝나는 접근자를 사용할 수 있습니다. 예를 들어 다음과 같습니다: ```python from transformers.testing_utils import TestCasePlus class PathExampleTest(TestCasePlus): def test_something_involving_stringified_locations(self): examples_dir = self.examples_dir_str ``` ### 임시 파일 및 디렉터리[[temporary-files-and-directories]] 고유한 임시 파일 및 디렉터리를 사용하는 것은 병렬 테스트 실행에 있어 필수적입니다. 이렇게 함으로써 테스트들이 서로의 데이터를 덮어쓰지 않게 할 수 있습니다. 또한 우리는 생성된 테스트의 종료 단계에서 이러한 임시 파일 및 디렉터리를 제거하고 싶습니다. 따라서 이러한 요구 사항을 충족시켜주는 `tempfile`과 같은 패키지를 사용하는 것이 중요합니다. 그러나 테스트를 디버깅할 때는 임시 파일이나 디렉터리에 들어가는 내용을 확인할 수 있어야 하며, 재실행되는 각 테스트마다 임시 파일이나 디렉터리의 경로에 대해 무작위 값이 아닌 정확한 값을 알고 싶을 것입니다. `transformers.test_utils.TestCasePlus`라는 도우미 클래스는 이러한 목적에 가장 적합합니다. 이 클래스는 `unittest.TestCase`의 하위 클래스이므로, 우리는 이것을 테스트 모듈에서 쉽게 상속할 수 있습니다. 다음은 해당 클래스를 사용하는 예시입니다: ```python from transformers.testing_utils import TestCasePlus class ExamplesTests(TestCasePlus): def test_whatever(self): tmp_dir = self.get_auto_remove_tmp_dir() ``` 이 코드는 고유한 임시 디렉터리를 생성하고 `tmp_dir`을 해당 위치로 설정합니다. - 고유한 임시 디렉터리를 생성합니다: ```python def test_whatever(self): tmp_dir = self.get_auto_remove_tmp_dir() ``` `tmp_dir`에는 생성된 임시 디렉터리의 경로가 포함됩니다. 이는 테스트의 종료 단계에서 자동으로 제거됩니다. - 선택한 경로로 임시 디렉터리 생성 후에 테스트 시작 전에 비어 있는 상태인지 확인하고, 테스트 후에는 비우지 마세요. ```python def test_whatever(self): tmp_dir = self.get_auto_remove_tmp_dir("./xxx") ``` 이것은 디버깅할 때 특정 디렉터리를 모니터링하고, 그 디렉터리에 이전에 실행된 테스트가 데이터를 남기지 않도록 하는 데에 유용합니다. - `before` 및 `after` 인수를 직접 오버라이딩하여 기본 동작을 변경할 수 있으며 다음 중 하나의 동작으로 이어집니다: - `before=True`: 테스트 시작 시 임시 디렉터리가 항상 지워집니다. - `before=False`: 임시 디렉터리가 이미 존재하는 경우 기존 파일은 그대로 남습니다. - `after=True`: 테스트 종료 시 임시 디렉터리가 항상 삭제됩니다. - `after=False`: 테스트 종료 시 임시 디렉터리가 항상 그대로 유지됩니다. <Tip> `rm -r`에 해당하는 명령을 안전하게 실행하기 위해, 명시적인 `tmp_dir`을 사용하는 경우 프로젝트 저장소 체크 아웃의 하위 디렉터리만 허용됩니다. 따라서 실수로 `/tmp`가 아닌 중요한 파일 시스템의 일부가 삭제되지 않도록 항상 `./`로 시작하는 경로를 전달해야 합니다. </Tip> <Tip> 각 테스트는 여러 개의 임시 디렉터리를 등록할 수 있으며, 별도로 요청하지 않는 한 모두 자동으로 제거됩니다. </Tip> ### 임시 sys.path 오버라이드[[temporary-sys.path-override]] `sys.path`를 다른 테스트로 임시로 오버라이드하기 위해 예를 들어 `ExtendSysPath` 컨텍스트 관리자를 사용할 수 있습니다. 예를 들어 다음과 같습니다: ```python import os from transformers.testing_utils import ExtendSysPath bindir = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(f"{bindir}/.."): from test_trainer import TrainerIntegrationCommon # noqa ``` ### 테스트 건너뛰기[[skipping-tests]] 이것은 버그가 발견되어 새로운 테스트가 작성되었지만 아직 그 버그가 수정되지 않은 경우에 유용합니다. 이 테스트를 주 저장소에 커밋하려면 `make test` 중에 건너뛰도록 해야 합니다. 방법: - **skip**은 테스트가 일부 조건이 충족될 경우에만 통과될 것으로 예상되고, 그렇지 않으면 pytest가 전체 테스트를 건너뛰어야 함을 의미합니다. 일반적인 예로는 Windows가 아닌 플랫폼에서 Windows 전용 테스트를 건너뛰거나 외부 리소스(예를 들어 데이터베이스)에 의존하는 테스트를 건너뛰는 것이 있습니다. - **xfail**은 테스트가 특정한 이유로 인해 실패할 것으로 예상하는 것을 의미합니다. 일반적인 예로는 아직 구현되지 않은 기능이나 아직 수정되지 않은 버그의 테스트가 있습니다. `xfail`로 표시된 테스트가 예상대로 실패하지 않고 통과된 경우, 이것은 xpass이며 테스트 결과 요약에 기록됩니다. 두 가지 중요한 차이점 중 하나는 `skip`은 테스트를 실행하지 않지만 `xfail`은 실행한다는 것입니다. 따라서 오류가 있는 코드가 일부 테스트에 영향을 미칠 수 있는 경우 `xfail`을 사용하지 마세요. #### 구현[[implementation]] - 전체 테스트를 무조건 건너뛰려면 다음과 같이 할 수 있습니다: ```python no-style @unittest.skip("this bug needs to be fixed") def test_feature_x(): ``` 또는 pytest를 통해: ```python no-style @pytest.mark.skip(reason="this bug needs to be fixed") ``` 또는 `xfail` 방식으로: ```python no-style @pytest.mark.xfail def test_feature_x(): ``` - 테스트 내부에서 내부 확인에 따라 테스트를 건너뛰는 방법은 다음과 같습니다: ```python def test_feature_x(): if not has_something(): pytest.skip("unsupported configuration") ``` 또는 모듈 전체: ```python import pytest if not pytest.config.getoption("--custom-flag"): pytest.skip("--custom-flag is missing, skipping tests", allow_module_level=True) ``` 또는 `xfail` 방식으로: ```python def test_feature_x(): pytest.xfail("expected to fail until bug XYZ is fixed") ``` - import가 missing된 모듈이 있을 때 그 모듈의 모든 테스트를 건너뛰는 방법: ```python docutils = pytest.importorskip("docutils", minversion="0.3") ``` - 조건에 따라 테스트를 건너뛰는 방법: ```python no-style @pytest.mark.skipif(sys.version_info < (3,6), reason="requires python3.6 or higher") def test_feature_x(): ``` 또는: ```python no-style @unittest.skipIf(torch_device == "cpu", "Can't do half precision") def test_feature_x(): ``` 또는 모듈 전체를 건너뛰는 방법: ```python no-style @pytest.mark.skipif(sys.platform == 'win32', reason="does not run on windows") class TestClass(): def test_feature_x(self): ``` 보다 자세한 예제 및 방법은 [여기](https://docs.pytest.org/en/latest/skipping.html)에서 확인할 수 있습니다. ### 느린 테스트[[slow-tests]] 테스트 라이브러리는 지속적으로 확장되고 있으며, 일부 테스트는 실행하는 데 몇 분이 걸립니다. 그리고 우리에게는 테스트 스위트가 CI를 통해 완료되기까지 한 시간을 기다릴 여유가 없습니다. 따라서 필수 테스트를 위한 일부 예외를 제외하고 느린 테스트는 다음과 같이 표시해야 합니다. ```python no-style from transformers.testing_utils import slow @slow def test_integration_foo(): ``` `@slow`로 표시된 테스트를 실행하려면 `RUN_SLOW=1` 환경 변수를 설정하세요. 예를 들어 다음과 같습니다: ```bash RUN_SLOW=1 pytest tests ``` `@parameterized`와 같은 몇 가지 데코레이터는 테스트 이름을 다시 작성합니다. 그러므로 `@slow`와 나머지 건너뛰기 데코레이터 `@require_*`가 올바르게 작동되려면 마지막에 나열되어야 합니다. 다음은 올바른 사용 예입니다. ```python no-style @parameterized.expand(...) @slow def test_integration_foo(): ``` 이 문서의 초반부에 설명된 것처럼 느린 테스트는 PR의 CI 확인이 아닌 예약된 일정 기반으로 실행됩니다. 따라서 PR 제출 중에 일부 문제를 놓친 채로 병합될 수 있습니다. 이러한 문제들은 다음번의 예정된 CI 작업 중에 감지됩니다. 하지만 PR을 제출하기 전에 자신의 컴퓨터에서 느린 테스트를 실행하는 것 또한 중요합니다. 느린 테스트로 표시해야 하는지 여부를 결정하는 대략적인 결정 기준은 다음과 같습니다. 만약 테스트가 라이브러리의 내부 구성 요소 중 하나에 집중되어 있다면(예: 모델링 파일, 토큰화 파일, 파이프라인), 해당 테스트를 느린 테스트 스위트에서 실행해야 합니다. 만약 라이브러리의 다른 측면(예: 문서 또는 예제)에 집중되어 있다면, 해당 테스트를 느린 테스트 스위트에서 실행해야 합니다. 그리고 이 접근 방식을 보완하기 위해 예외를 만들어야 합니다. - 무거운 가중치 세트나 50MB보다 큰 데이터셋을 다운로드해야 하는 모든 테스트(예: 모델 통합 테스트, 토크나이저 통합 테스트, 파이프라인 통합 테스트)를 느린 테스트로 설정해야 합니다. 새로운 모델을 추가하는 경우 통합 테스트용으로 무작위 가중치로 작은 버전을 만들어 허브에 업로드해야 합니다. 이 내용은 아래 단락에서 설명됩니다. - 특별히 빠르게 실행되도록 최적화되지 않은 학습을 수행해야 하는 테스트는 느린 테스트로 설정해야 합니다. - 느리지 않아야 할 테스트 중 일부가 극도로 느린 경우 예외를 도입하고 이를 `@slow`로 설정할 수 있습니다. 대용량 파일을 디스크에 저장하고 불러오는 자동 모델링 테스트는 `@slow`으로 표시된 테스트의 좋은 예입니다. - CI에서 1초 이내에 테스트가 완료되는 경우(다운로드 포함)에는 느린 테스트가 아니어야 합니다. 느린 테스트가 아닌 경우에는 다양한 내부를 완전히 커버하면서 빠르게 유지되어야 합니다. 예를 들어, 무작위 가중치를 사용하여 특별히 생성된 작은 모델로 테스트하면 상당한 커버리지를 얻을 수 있습니다. 이러한 모델은 최소한의 레이어 수(예: 2), 어휘 크기(예: 1000) 등의 요소만 가집니다. 그런 다음 `@slow` 테스트는 대형 느린 모델을 사용하여 정성적인 테스트를 수행할 수 있습니다. 이러한 작은 모델을 사용하는 방법을 확인하려면 다음과 같이 *tiny* 모델을 찾아보세요. ```bash grep tiny tests examples ``` 다음은 작은 모델[stas/tiny-wmt19-en-de](https://huggingface.co/stas/tiny-wmt19-en-de)을 만든 [script](https://github.com/huggingface/transformers/tree/main/scripts/fsmt/fsmt-make-tiny-model.py) 예시입니다. 특정 모델의 아키텍처에 맞게 쉽게 조정할 수 있습니다. 예를 들어 대용량 모델을 다운로드하는 경우 런타임을 잘못 측정하기 쉽지만, 로컬에서 테스트하면 다운로드한 파일이 캐시되어 다운로드 시간이 측정되지 않습니다. 대신 CI 로그의 실행 속도 보고서를 확인하세요(`pytest --durations=0 tests`의 출력). 이 보고서는 느린 이상값으로 표시되지 않거나 빠르게 다시 작성해야 하는 느린 이상값을 찾는 데도 유용합니다. CI에서 테스트 스위트가 느려지기 시작하면 이 보고서의 맨 위 목록에 가장 느린 테스트가 표시됩니다. ### stdout/stderr 출력 테스트[[testing-the-stdout/stderr-output]] `stdout` 및/또는 `stderr`로 쓰는 함수를 테스트하려면 `pytest`의 [capsys 시스템](https://docs.pytest.org/en/latest/capture.html)을 사용하여 해당 스트림에 액세스할 수 있습니다. 다음과 같이 수행할 수 있습니다. ```python import sys def print_to_stdout(s): print(s) def print_to_stderr(s): sys.stderr.write(s) def test_result_and_stdout(capsys): msg = "Hello" print_to_stdout(msg) print_to_stderr(msg) out, err = capsys.readouterr() # 캡처된 출력 스트림 사용 # 선택 사항: 캡처된 스트림 재생성 sys.stdout.write(out) sys.stderr.write(err) # 테스트: assert msg in out assert msg in err ``` 그리고, 물론 대부분의 경우에는 `stderr`는 예외의 일부로 제공됩니다. 그러므로 해당 경우에는 try/except를 사용해야 합니다. ```python def raise_exception(msg): raise ValueError(msg) def test_something_exception(): msg = "Not a good value" error = "" try: raise_exception(msg) except Exception as e: error = str(e) assert msg in error, f"{msg} is in the exception:\n{error}" ``` `stdout`를 캡처하는 또 다른 방법은 `contextlib.redirect_stdout`를 사용하는 것입니다. ```python from io import StringIO from contextlib import redirect_stdout def print_to_stdout(s): print(s) def test_result_and_stdout(): msg = "Hello" buffer = StringIO() with redirect_stdout(buffer): print_to_stdout(msg) out = buffer.getvalue() # 선택 사항: 캡처된 스트림 재생성 sys.stdout.write(out) # 테스트: assert msg in out ``` `stdout` 캡처에 관련된 중요한 문제 중 하나는 보통 `print`에서 이전에 인쇄된 내용을 재설정하는 `\r` 문자가 포함될 수 있다는 것입니다. `pytest`에서는 문제가 없지만 `pytest -s`에서는 이러한 문자가 버퍼에 포함되므로 `-s`가 있거나 없는 상태에서 태스트를 수행할 수 있으려면 캡처된 출력에 대해 추가적인 정리가 필요합니다. 이 경우에는 `re.sub(r'~.*\r', '', buf, 0, re.M)`을 사용할 수 있습니다. 하지만 도우미 컨텍스트 관리자 래퍼를 사용하면 출력에 `\r`이 포함되어 있는지의 여부에 관계없이 모든 것을 자동으로 처리하므로 편리합니다. ```python from transformers.testing_utils import CaptureStdout with CaptureStdout() as cs: function_that_writes_to_stdout() print(cs.out) ``` 다음은 전체 테스트 예제입니다. ```python from transformers.testing_utils import CaptureStdout msg = "Secret message\r" final = "Hello World" with CaptureStdout() as cs: print(msg + final) assert cs.out == final + "\n", f"captured: {cs.out}, expecting {final}" ``` `stderr`를 캡처하고 싶다면, 대신 `CaptureStderr` 클래스를 사용하세요. ```python from transformers.testing_utils import CaptureStderr with CaptureStderr() as cs: function_that_writes_to_stderr() print(cs.err) ``` 두 스트림을 동시에 캡처해야 한다면, 부모 `CaptureStd` 클래스를 사용하세요. ```python from transformers.testing_utils import CaptureStd with CaptureStd() as cs: function_that_writes_to_stdout_and_stderr() print(cs.err, cs.out) ``` 또한, 테스트의 디버깅을 지원하기 위해 이러한 컨텍스트 관리자는 기본적으로 컨텍스트에서 종료할 때 캡처된 스트림을 자동으로 다시 실행합니다. ### 로거 스트림 캡처[[capturing-logger-stream]] 로거 출력을 검증해야 하는 경우 `CaptureLogger`를 사용할 수 있습니다. ```python from transformers import logging from transformers.testing_utils import CaptureLogger msg = "Testing 1, 2, 3" logging.set_verbosity_info() logger = logging.get_logger("transformers.models.bart.tokenization_bart") with CaptureLogger(logger) as cl: logger.info(msg) assert cl.out, msg + "\n" ``` ### 환경 변수를 이용하여 테스트[[testing-with-environment-variables]] 특정 테스트의 환경 변수 영향을 검증하려면 `transformers.testing_utils.mockenv`라는 도우미 데코레이터를 사용할 수 있습니다. ```python from transformers.testing_utils import mockenv class HfArgumentParserTest(unittest.TestCase): @mockenv(TRANSFORMERS_VERBOSITY="error") def test_env_override(self): env_level_str = os.getenv("TRANSFORMERS_VERBOSITY", None) ``` 일부 경우에는 외부 프로그램을 호출해야할 수도 있는데, 이 때에는 여러 개의 로컬 경로를 포함하는 `os.environ`에서 `PYTHONPATH`의 설정이 필요합니다. 헬퍼 클래스 `transformers.test_utils.TestCasePlus`가 도움이 됩니다: ```python from transformers.testing_utils import TestCasePlus class EnvExampleTest(TestCasePlus): def test_external_prog(self): env = self.get_env() # 이제 `env`를 사용하여 외부 프로그램 호출 ``` 테스트 파일이 `tests` 테스트 스위트 또는 `examples`에 있는지에 따라 `env[PYTHONPATH]`가 두 디렉터리 중 하나를 포함하도록 설정되며, 현재 저장소에 대해 테스트가 수행되도록 `src` 디렉터리도 포함됩니다. 테스트 호출 이전에 설정된 경우에는 `env[PYTHONPATH]`를 그대로 사용합니다. 이 헬퍼 메소드는 `os.environ` 객체의 사본을 생성하므로 원본은 그대로 유지됩니다. ### 재현 가능한 결과 얻기[[getting-reproducible-results]] 일부 상황에서 테스트에서 임의성을 제거하여 동일하게 재현 가능한 결과를 얻고 싶을 수 있습니다. 이를 위해서는 다음과 같이 시드를 고정해야 합니다. ```python seed = 42 # 파이썬 RNG import random random.seed(seed) # 파이토치 RNG import torch torch.manual_seed(seed) torch.backends.cudnn.deterministic = True if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed) # 넘파이 RNG import numpy as np np.random.seed(seed) # 텐서플로 RNG tf.random.set_seed(seed) ``` ### 테스트 디버깅[[debugging tests]] 경고가 있는 곳에서 디버거를 시작하려면 다음을 수행하세요. ```bash pytest tests/utils/test_logging.py -W error::UserWarning --pdb ``` ## Github Actions 워크플로우 작업 처리[[working-with-github-actions-workflows]] 셀프 푸시 워크플로우 CI 작업을 트리거하려면, 다음을 수행해야 합니다. 1. `transformers` 원본에서 새 브랜치를 만듭니다(포크가 아닙니다!). 2. 브랜치 이름은 `ci_` 또는 `ci-`로 시작해야 합니다(`main`도 트리거하지만 `main`에서는 PR을 할 수 없습니다). 또한 특정 경로에 대해서만 트리거되므로 이 문서가 작성된 후에 변경된 내용은 [여기](https://github.com/huggingface/transformers/blob/main/.github/workflows/self-push.yml)의 *push:*에서 확인할 수 있습니다. 3. 이 브랜치에서 PR을 생성합니다 4. 그런 다음 [여기](https://github.com/huggingface/transformers/actions/workflows/self-push.yml)에서 작업이 나타나는지 확인할 수 있습니다. 백로그가 있는 경우, 바로 실행되지 않을 수도 있습니다. ## 실험적인 CI 기능 테스트[[testing-Experimental-CI-Features]] CI 기능을 테스트하는 것은 일반 CI 작동에 방해가 될 수 있기 때문에 잠재적으로 문제가 발생할 수 있습니다. 따라서 새로운 CI 기능을 추가하는 경우 다음과 같이 수행해야 합니다. 1. 테스트해야 할 내용을 테스트하는 새로운 전용 작업을 생성합니다. 2. 새로운 작업은 항상 성공해야만 녹색 ✓를 받을 수 있습니다(아래에 자세한 내용이 있습니다). 3. 다양한 PR 유형에 대한 확인을 위해 (사용자 포크 브랜치, 포크되지 않은 브랜치, github.com UI 직접 파일 편집에서 생성된 브랜치, 강제 푸시 등 PR의 유형은 아주 다양합니다.) 며칠 동안 실험 작업의 로그를 모니터링하면서 실행해봅니다. (의도적으로 항상 녹색을 표시하므로 작업 전체가 녹색은 아니라는 점에 유의합니다.) 4. 모든 것이 안정적인지 확인한 후, 새로운 변경 사항을 기존 작업에 병합합니다. 이렇게 하면 CI 기능 자체에 대한 실험이 일반 작업 흐름에 방해가 되지 않습니다. 그러나 새로운 CI 기능이 개발 중인 동안, 항상 성공하도록 할 수 있는 방법은 무엇일까요? TravisCI와 같은 일부 CI는 `ignore-step-failure`를 지원하며 전체 작업을 성공한 것으로 보고하지만, 현재 우리가 사용하는 CircleCI와 Github Actions는 이를 지원하지 않습니다. 따라서 다음과 같은 해결책을 사용할 수 있습니다. 1. bash 스크립트에서 가능한 많은 오류를 억제하기 위해 실행 명령의 시작 부분에 `set +euo pipefail`을 추가합니다. 2. 마지막 명령은 반드시 성공해야 합니다. `echo "done"` 또는 `true`를 사용하면 됩니다. 예시는 다음과 같습니다. ```yaml - run: name: run CI experiment command: | set +euo pipefail echo "setting run-all-despite-any-errors-mode" this_command_will_fail echo "but bash continues to run" # emulate another failure false # but the last command must be a success echo "during experiment do not remove: reporting success to CI, even if there were failures" ``` 간단한 명령의 경우 다음과 같이 수행할 수도 있습니다. ```bash cmd_that_may_fail || true ``` 결과에 만족한 후에는 물론, 실험적인 단계 또는 작업을 일반 작업의 나머지 부분과 통합하면서 `set +euo pipefail` 또는 기타 추가한 요소를 제거하여 실험 작업이 일반 CI 작동에 방해되지 않도록 해야 합니다. 이 전반적인 과정은 실험 단계가 PR의 전반적인 상태에 영향을 주지 않고 실패하도록 `allow-failure`와 같은 기능을 설정할 수 있다면 훨씬 더 쉬웠을 것입니다. 그러나 앞에서 언급한 바와 같이 CircleCI와 Github Actions는 현재 이러한 기능들 지원하지 않습니다. 이 기능의 지원을 위한 투표에 참여하고 CI 관련 스레드들에서 이러한 상황을 확인할 수도 있습니다. - [Github Actions:](https://github.com/actions/toolkit/issues/399) - [CircleCI:](https://ideas.circleci.com/ideas/CCI-I-344)
transformers/docs/source/ko/testing.md/0
{ "file_path": "transformers/docs/source/ko/testing.md", "repo_id": "transformers", "token_count": 35298 }
280
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Usando os Tokenizers do 🤗 Tokenizers O [`PreTrainedTokenizerFast`] depende da biblioteca [🤗 Tokenizers](https://huggingface.co/docs/tokenizers). O Tokenizer obtido da biblioteca 🤗 Tokenizers pode ser carregado facilmente pelo 🤗 Transformers. Antes de entrar nos detalhes, vamos começar criando um tokenizer fictício em algumas linhas: ```python >>> from tokenizers import Tokenizer >>> from tokenizers.models import BPE >>> from tokenizers.trainers import BpeTrainer >>> from tokenizers.pre_tokenizers import Whitespace >>> tokenizer = Tokenizer(BPE(unk_token="[UNK]")) >>> trainer = BpeTrainer(special_tokens=["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"]) >>> tokenizer.pre_tokenizer = Whitespace() >>> files = [...] >>> tokenizer.train(files, trainer) ``` Agora temos um tokenizer treinado nos arquivos que foram definidos. Nós podemos continuar usando nessa execução ou salvar em um arquivo JSON para re-utilizar no futuro. ## Carregando diretamente de um objeto tokenizer Vamos ver como aproveitar esse objeto tokenizer na biblioteca 🤗 Transformers. A classe [`PreTrainedTokenizerFast`] permite uma instanciação fácil, aceitando o objeto *tokenizer* instanciado como um argumento: ```python >>> from transformers import PreTrainedTokenizerFast >>> fast_tokenizer = PreTrainedTokenizerFast(tokenizer_object=tokenizer) ``` Esse objeto pode ser utilizado com todos os métodos compartilhados pelos tokenizers dos 🤗 Transformers! Vá para [a página do tokenizer](main_classes/tokenizer) para mais informações. ## Carregando de um arquivo JSON Para carregar um tokenizer de um arquivo JSON vamos primeiro começar salvando nosso tokenizer: ```python >>> tokenizer.save("tokenizer.json") ``` A pasta para qual salvamos esse arquivo pode ser passada para o método de inicialização do [`PreTrainedTokenizerFast`] usando o `tokenizer_file` parâmetro: ```python >>> from transformers import PreTrainedTokenizerFast >>> fast_tokenizer = PreTrainedTokenizerFast(tokenizer_file="tokenizer.json") ``` Esse objeto pode ser utilizado com todos os métodos compartilhados pelos tokenizers dos 🤗 Transformers! Vá para [a página do tokenizer](main_classes/tokenizer) para mais informações.
transformers/docs/source/pt/fast_tokenizers.md/0
{ "file_path": "transformers/docs/source/pt/fast_tokenizers.md", "repo_id": "transformers", "token_count": 937 }
281
- sections: - local: index title: 🤗 Transformers 简介 - local: quicktour title: 快速上手 - local: installation title: 安装 title: 开始使用 - sections: - local: pipeline_tutorial title: 使用pipelines进行推理 - local: autoclass_tutorial title: 使用AutoClass编写可移植的代码 - local: preprocessing title: 预处理数据 - local: training title: 微调预训练模型 - local: run_scripts title: 通过脚本训练模型 - local: accelerate title: 使用🤗Accelerate进行分布式训练 - local: peft title: 使用🤗 PEFT加载和训练adapters - local: model_sharing title: 分享您的模型 - local: transformers_agents title: agents教程 - local: llm_tutorial title: 使用LLMs进行生成 title: 教程 - sections: - isExpanded: false sections: - local: tasks/asr title: 自动语音识别 - sections: - local: fast_tokenizers title: 使用 🤗 Tokenizers 中的分词器 - local: multilingual title: 使用多语言模型进行推理 - local: create_a_model title: 使用特定于模型的 API - local: custom_models title: 共享自定义模型 - local: chat_templating title: 聊天模型的模板 - local: serialization title: 导出为 ONNX - local: tflite title: 导出为 TFLite - local: torchscript title: 导出为 TorchScript title: 开发者指南 - sections: - local: performance title: 综述 - sections: - local: fsdp title: 完全分片数据并行 - local: perf_hardware title: 用于训练的定制硬件 - local: hpo_train title: 使用Trainer API 进行超参数搜索 title: 高效训练技术 - local: big_models title: 实例化大模型 - local: debugging title: 问题定位及解决 - local: tf_xla title: TensorFlow模型的XLA集成 - local: perf_torch_compile title: 使用 `torch.compile()` 优化推理 title: 性能和可扩展性 - sections: - local: contributing title: 如何为 🤗 Transformers 做贡献? - local: add_new_pipeline title: 如何将流水线添加到 🤗 Transformers? title: 贡献 - sections: - local: task_summary title: 🤗Transformers能做什么 - local: tokenizer_summary title: 分词器的摘要 title: 概念指南 - sections: - sections: - local: main_classes/agent title: Agents和工具 - local: main_classes/callback title: Callbacks - local: main_classes/configuration title: Configuration - local: main_classes/data_collator title: Data Collator - local: main_classes/keras_callbacks title: Keras callbacks - local: main_classes/logging title: Logging - local: main_classes/model title: 模型 - local: main_classes/text_generation title: 文本生成 - local: main_classes/onnx title: ONNX - local: main_classes/optimizer_schedules title: Optimization - local: main_classes/output title: 模型输出 - local: main_classes/pipelines title: Pipelines - local: main_classes/processors title: Processors - local: main_classes/quantization title: Quantization - local: main_classes/tokenizer title: Tokenizer - local: main_classes/trainer title: Trainer - local: main_classes/deepspeed title: DeepSpeed集成 - local: main_classes/feature_extractor title: Feature Extractor - local: main_classes/image_processor title: Image Processor title: 主要类 - sections: - local: internal/modeling_utils title: 自定义层和工具 - local: internal/pipelines_utils title: pipelines工具 - local: internal/tokenization_utils title: Tokenizers工具 - local: internal/trainer_utils title: 训练器工具 - local: internal/generation_utils title: 生成工具 - local: internal/image_processing_utils title: 图像处理工具 - local: internal/audio_utils title: 音频处理工具 - local: internal/file_utils title: 通用工具 - local: internal/time_series_utils title: 时序数据工具 title: 内部辅助工具 title: 应用程序接口 (API)
transformers/docs/source/zh/_toctree.yml/0
{ "file_path": "transformers/docs/source/zh/_toctree.yml", "repo_id": "transformers", "token_count": 2046 }
282
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 使用 torch.compile() 优化推理 本指南旨在为使用[`torch.compile()`](https://pytorch.org/tutorials/intermediate/torch_compile_tutorial.html)在[🤗 Transformers中的计算机视觉模型](https://huggingface.co/models?pipeline_tag=image-classification&library=transformers&sort=trending)中引入的推理速度提升提供一个基准。 ## torch.compile 的优势 根据模型和GPU的不同,`torch.compile()`在推理过程中可以提高多达30%的速度。要使用`torch.compile()`,只需安装2.0及以上版本的`torch`即可。 编译模型需要时间,因此如果您只需要编译一次模型而不是每次推理都编译,那么它非常有用。 要编译您选择的任何计算机视觉模型,请按照以下方式调用`torch.compile()`: ```diff from transformers import AutoModelForImageClassification model = AutoModelForImageClassification.from_pretrained(MODEL_ID).to("cuda") + model = torch.compile(model) ``` `compile()` 提供了多种编译模式,它们在编译时间和推理开销上有所不同。`max-autotune` 比 `reduce-overhead` 需要更长的时间,但会得到更快的推理速度。默认模式在编译时最快,但在推理时间上与 `reduce-overhead` 相比效率较低。在本指南中,我们使用了默认模式。您可以在[这里](https://pytorch.org/get-started/pytorch-2.0/#user-experience)了解更多信息。 我们在 PyTorch 2.0.1 版本上使用不同的计算机视觉模型、任务、硬件类型和数据批量大小对 `torch.compile` 进行了基准测试。 ## 基准测试代码 以下是每个任务的基准测试代码。我们在推理之前”预热“GPU,并取300次推理的平均值,每次使用相同的图像。 ### 使用 ViT 进行图像分类 ```python import torch from PIL import Image import requests import numpy as np from transformers import AutoImageProcessor, AutoModelForImageClassification url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224") model = AutoModelForImageClassification.from_pretrained("google/vit-base-patch16-224").to("cuda") model = torch.compile(model) processed_input = processor(image, return_tensors='pt').to(device="cuda") with torch.no_grad(): _ = model(**processed_input) ``` #### 使用 DETR 进行目标检测 ```python from transformers import AutoImageProcessor, AutoModelForObjectDetection processor = AutoImageProcessor.from_pretrained("facebook/detr-resnet-50") model = AutoModelForObjectDetection.from_pretrained("facebook/detr-resnet-50").to("cuda") model = torch.compile(model) texts = ["a photo of a cat", "a photo of a dog"] inputs = processor(text=texts, images=image, return_tensors="pt").to("cuda") with torch.no_grad(): _ = model(**inputs) ``` #### 使用 Segformer 进行图像分割 ```python from transformers import SegformerImageProcessor, SegformerForSemanticSegmentation processor = SegformerImageProcessor.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512") model = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512").to("cuda") model = torch.compile(model) seg_inputs = processor(images=image, return_tensors="pt").to("cuda") with torch.no_grad(): _ = model(**seg_inputs) ``` 以下是我们进行基准测试的模型列表。 **图像分类** - [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) - [microsoft/beit-base-patch16-224-pt22k-ft22k](https://huggingface.co/microsoft/beit-base-patch16-224-pt22k-ft22k) - [facebook/convnext-large-224](https://huggingface.co/facebook/convnext-large-224) - [microsoft/resnet-50](https://huggingface.co/) **图像分割** - [nvidia/segformer-b0-finetuned-ade-512-512](https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512) - [facebook/mask2former-swin-tiny-coco-panoptic](https://huggingface.co/facebook/mask2former-swin-tiny-coco-panoptic) - [facebook/maskformer-swin-base-ade](https://huggingface.co/facebook/maskformer-swin-base-ade) - [google/deeplabv3_mobilenet_v2_1.0_513](https://huggingface.co/google/deeplabv3_mobilenet_v2_1.0_513) **目标检测** - [google/owlvit-base-patch32](https://huggingface.co/google/owlvit-base-patch32) - [facebook/detr-resnet-101](https://huggingface.co/facebook/detr-resnet-101) - [microsoft/conditional-detr-resnet-50](https://huggingface.co/microsoft/conditional-detr-resnet-50) 下面是使用和不使用`torch.compile()`的推理持续时间可视化,以及每个模型在不同硬件和数据批量大小下的改进百分比。 <div class="flex"> <div> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/torch_compile/a100_batch_comp.png" /> </div> <div> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/torch_compile/v100_batch_comp.png" /> </div> <div> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/torch_compile/t4_batch_comp.png" /> </div> </div> <div class="flex"> <div> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/torch_compile/A100_1_duration.png" /> </div> <div> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/torch_compile/A100_1_percentage.png" /> </div> </div> ![Duration Comparison on V100 with Batch Size of 1](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/torch_compile/v100_1_duration.png) ![Percentage Improvement on T4 with Batch Size of 4](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/torch_compile/T4_4_percentage.png) 下面可以找到每个模型使用和不使用`compile()`的推理时间(毫秒)。请注意,OwlViT在大批量大小下会导致内存溢出。 ### A100 (batch size: 1) | **Task/Model** | **torch 2.0 - <br>no compile** | **torch 2.0 - <br>compile** | |:---:|:---:|:---:| | Image Classification/ViT | 9.325 | 7.584 | | Image Segmentation/Segformer | 11.759 | 10.500 | | Object Detection/OwlViT | 24.978 | 18.420 | | Image Classification/BeiT | 11.282 | 8.448 | | Object Detection/DETR | 34.619 | 19.040 | | Image Classification/ConvNeXT | 10.410 | 10.208 | | Image Classification/ResNet | 6.531 | 4.124 | | Image Segmentation/Mask2former | 60.188 | 49.117 | | Image Segmentation/Maskformer | 75.764 | 59.487 | | Image Segmentation/MobileNet | 8.583 | 3.974 | | Object Detection/Resnet-101 | 36.276 | 18.197 | | Object Detection/Conditional-DETR | 31.219 | 17.993 | ### A100 (batch size: 4) | **Task/Model** | **torch 2.0 - <br>no compile** | **torch 2.0 - <br>compile** | |:---:|:---:|:---:| | Image Classification/ViT | 14.832 | 14.499 | | Image Segmentation/Segformer | 18.838 | 16.476 | | Image Classification/BeiT | 13.205 | 13.048 | | Object Detection/DETR | 48.657 | 32.418| | Image Classification/ConvNeXT | 22.940 | 21.631 | | Image Classification/ResNet | 6.657 | 4.268 | | Image Segmentation/Mask2former | 74.277 | 61.781 | | Image Segmentation/Maskformer | 180.700 | 159.116 | | Image Segmentation/MobileNet | 14.174 | 8.515 | | Object Detection/Resnet-101 | 68.101 | 44.998 | | Object Detection/Conditional-DETR | 56.470 | 35.552 | ### A100 (batch size: 16) | **Task/Model** | **torch 2.0 - <br>no compile** | **torch 2.0 - <br>compile** | |:---:|:---:|:---:| | Image Classification/ViT | 40.944 | 40.010 | | Image Segmentation/Segformer | 37.005 | 31.144 | | Image Classification/BeiT | 41.854 | 41.048 | | Object Detection/DETR | 164.382 | 161.902 | | Image Classification/ConvNeXT | 82.258 | 75.561 | | Image Classification/ResNet | 7.018 | 5.024 | | Image Segmentation/Mask2former | 178.945 | 154.814 | | Image Segmentation/Maskformer | 638.570 | 579.826 | | Image Segmentation/MobileNet | 51.693 | 30.310 | | Object Detection/Resnet-101 | 232.887 | 155.021 | | Object Detection/Conditional-DETR | 180.491 | 124.032 | ### V100 (batch size: 1) | **Task/Model** | **torch 2.0 - <br>no compile** | **torch 2.0 - <br>compile** | |:---:|:---:|:---:| | Image Classification/ViT | 10.495 | 6.00 | | Image Segmentation/Segformer | 13.321 | 5.862 | | Object Detection/OwlViT | 25.769 | 22.395 | | Image Classification/BeiT | 11.347 | 7.234 | | Object Detection/DETR | 33.951 | 19.388 | | Image Classification/ConvNeXT | 11.623 | 10.412 | | Image Classification/ResNet | 6.484 | 3.820 | | Image Segmentation/Mask2former | 64.640 | 49.873 | | Image Segmentation/Maskformer | 95.532 | 72.207 | | Image Segmentation/MobileNet | 9.217 | 4.753 | | Object Detection/Resnet-101 | 52.818 | 28.367 | | Object Detection/Conditional-DETR | 39.512 | 20.816 | ### V100 (batch size: 4) | **Task/Model** | **torch 2.0 - <br>no compile** | **torch 2.0 - <br>compile** | |:---:|:---:|:---:| | Image Classification/ViT | 15.181 | 14.501 | | Image Segmentation/Segformer | 16.787 | 16.188 | | Image Classification/BeiT | 15.171 | 14.753 | | Object Detection/DETR | 88.529 | 64.195 | | Image Classification/ConvNeXT | 29.574 | 27.085 | | Image Classification/ResNet | 6.109 | 4.731 | | Image Segmentation/Mask2former | 90.402 | 76.926 | | Image Segmentation/Maskformer | 234.261 | 205.456 | | Image Segmentation/MobileNet | 24.623 | 14.816 | | Object Detection/Resnet-101 | 134.672 | 101.304 | | Object Detection/Conditional-DETR | 97.464 | 69.739 | ### V100 (batch size: 16) | **Task/Model** | **torch 2.0 - <br>no compile** | **torch 2.0 - <br>compile** | |:---:|:---:|:---:| | Image Classification/ViT | 52.209 | 51.633 | | Image Segmentation/Segformer | 61.013 | 55.499 | | Image Classification/BeiT | 53.938 | 53.581 | | Object Detection/DETR | OOM | OOM | | Image Classification/ConvNeXT | 109.682 | 100.771 | | Image Classification/ResNet | 14.857 | 12.089 | | Image Segmentation/Mask2former | 249.605 | 222.801 | | Image Segmentation/Maskformer | 831.142 | 743.645 | | Image Segmentation/MobileNet | 93.129 | 55.365 | | Object Detection/Resnet-101 | 482.425 | 361.843 | | Object Detection/Conditional-DETR | 344.661 | 255.298 | ### T4 (batch size: 1) | **Task/Model** | **torch 2.0 - <br>no compile** | **torch 2.0 - <br>compile** | |:---:|:---:|:---:| | Image Classification/ViT | 16.520 | 15.786 | | Image Segmentation/Segformer | 16.116 | 14.205 | | Object Detection/OwlViT | 53.634 | 51.105 | | Image Classification/BeiT | 16.464 | 15.710 | | Object Detection/DETR | 73.100 | 53.99 | | Image Classification/ConvNeXT | 32.932 | 30.845 | | Image Classification/ResNet | 6.031 | 4.321 | | Image Segmentation/Mask2former | 79.192 | 66.815 | | Image Segmentation/Maskformer | 200.026 | 188.268 | | Image Segmentation/MobileNet | 18.908 | 11.997 | | Object Detection/Resnet-101 | 106.622 | 82.566 | | Object Detection/Conditional-DETR | 77.594 | 56.984 | ### T4 (batch size: 4) | **Task/Model** | **torch 2.0 - <br>no compile** | **torch 2.0 - <br>compile** | |:---:|:---:|:---:| | Image Classification/ViT | 43.653 | 43.626 | | Image Segmentation/Segformer | 45.327 | 42.445 | | Image Classification/BeiT | 52.007 | 51.354 | | Object Detection/DETR | 277.850 | 268.003 | | Image Classification/ConvNeXT | 119.259 | 105.580 | | Image Classification/ResNet | 13.039 | 11.388 | | Image Segmentation/Mask2former | 201.540 | 184.670 | | Image Segmentation/Maskformer | 764.052 | 711.280 | | Image Segmentation/MobileNet | 74.289 | 48.677 | | Object Detection/Resnet-101 | 421.859 | 357.614 | | Object Detection/Conditional-DETR | 289.002 | 226.945 | ### T4 (batch size: 16) | **Task/Model** | **torch 2.0 - <br>no compile** | **torch 2.0 - <br>compile** | |:---:|:---:|:---:| | Image Classification/ViT | 163.914 | 160.907 | | Image Segmentation/Segformer | 192.412 | 163.620 | | Image Classification/BeiT | 188.978 | 187.976 | | Object Detection/DETR | OOM | OOM | | Image Classification/ConvNeXT | 422.886 | 388.078 | | Image Classification/ResNet | 44.114 | 37.604 | | Image Segmentation/Mask2former | 756.337 | 695.291 | | Image Segmentation/Maskformer | 2842.940 | 2656.88 | | Image Segmentation/MobileNet | 299.003 | 201.942 | | Object Detection/Resnet-101 | 1619.505 | 1262.758 | | Object Detection/Conditional-DETR | 1137.513 | 897.390| ## PyTorch Nightly 我们还在 PyTorch Nightly 版本(2.1.0dev)上进行了基准测试,可以在[这里](https://download.pytorch.org/whl/nightly/cu118)找到 Nightly 版本的安装包,并观察到了未编译和编译模型的延迟性能改善。 ### A100 | **Task/Model** | **Batch Size** | **torch 2.0 - no compile** | **torch 2.0 -<br> compile** | |:---:|:---:|:---:|:---:| | Image Classification/BeiT | Unbatched | 12.462 | 6.954 | | Image Classification/BeiT | 4 | 14.109 | 12.851 | | Image Classification/BeiT | 16 | 42.179 | 42.147 | | Object Detection/DETR | Unbatched | 30.484 | 15.221 | | Object Detection/DETR | 4 | 46.816 | 30.942 | | Object Detection/DETR | 16 | 163.749 | 163.706 | ### T4 | **Task/Model** | **Batch Size** | **torch 2.0 - <br>no compile** | **torch 2.0 - <br>compile** | |:---:|:---:|:---:|:---:| | Image Classification/BeiT | Unbatched | 14.408 | 14.052 | | Image Classification/BeiT | 4 | 47.381 | 46.604 | | Image Classification/BeiT | 16 | 42.179 | 42.147 | | Object Detection/DETR | Unbatched | 68.382 | 53.481 | | Object Detection/DETR | 4 | 269.615 | 204.785 | | Object Detection/DETR | 16 | OOM | OOM | ### V100 | **Task/Model** | **Batch Size** | **torch 2.0 - <br>no compile** | **torch 2.0 - <br>compile** | |:---:|:---:|:---:|:---:| | Image Classification/BeiT | Unbatched | 13.477 | 7.926 | | Image Classification/BeiT | 4 | 15.103 | 14.378 | | Image Classification/BeiT | 16 | 52.517 | 51.691 | | Object Detection/DETR | Unbatched | 28.706 | 19.077 | | Object Detection/DETR | 4 | 88.402 | 62.949| | Object Detection/DETR | 16 | OOM | OOM | ## 降低开销 我们在 PyTorch Nightly 版本中为 A100 和 T4 进行了 `reduce-overhead` 编译模式的性能基准测试。 ### A100 | **Task/Model** | **Batch Size** | **torch 2.0 - <br>no compile** | **torch 2.0 - <br>compile** | |:---:|:---:|:---:|:---:| | Image Classification/ConvNeXT | Unbatched | 11.758 | 7.335 | | Image Classification/ConvNeXT | 4 | 23.171 | 21.490 | | Image Classification/ResNet | Unbatched | 7.435 | 3.801 | | Image Classification/ResNet | 4 | 7.261 | 2.187 | | Object Detection/Conditional-DETR | Unbatched | 32.823 | 11.627 | | Object Detection/Conditional-DETR | 4 | 50.622 | 33.831 | | Image Segmentation/MobileNet | Unbatched | 9.869 | 4.244 | | Image Segmentation/MobileNet | 4 | 14.385 | 7.946 | ### T4 | **Task/Model** | **Batch Size** | **torch 2.0 - <br>no compile** | **torch 2.0 - <br>compile** | |:---:|:---:|:---:|:---:| | Image Classification/ConvNeXT | Unbatched | 32.137 | 31.84 | | Image Classification/ConvNeXT | 4 | 120.944 | 110.209 | | Image Classification/ResNet | Unbatched | 9.761 | 7.698 | | Image Classification/ResNet | 4 | 15.215 | 13.871 | | Object Detection/Conditional-DETR | Unbatched | 72.150 | 57.660 | | Object Detection/Conditional-DETR | 4 | 301.494 | 247.543 | | Image Segmentation/MobileNet | Unbatched | 22.266 | 19.339 | | Image Segmentation/MobileNet | 4 | 78.311 | 50.983 |
transformers/docs/source/zh/perf_torch_compile.md/0
{ "file_path": "transformers/docs/source/zh/perf_torch_compile.md", "repo_id": "transformers", "token_count": 6786 }
283
<!--- Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # JAX/Flax Examples This folder contains actively maintained examples of 🤗 Transformers using the JAX/Flax backend. Porting models and examples to JAX/Flax is an ongoing effort, and more will be added in the coming months. In particular, these examples are all designed to run fast on Cloud TPUs, and we include step-by-step guides to getting started with Cloud TPU. *NOTE*: Currently, there is no "Trainer" abstraction for JAX/Flax -- all examples contain an explicit training loop. The following table lists all of our examples on how to use 🤗 Transformers with the JAX/Flax backend: - with information about the model and dataset used, - whether or not they leverage the [🤗 Datasets](https://github.com/huggingface/datasets) library, - links to **Colab notebooks** to walk through the scripts and run them easily. | Task | Example model | Example dataset | 🤗 Datasets | Colab |---|---|---|:---:|:---:| | [**`causal-language-modeling`**](https://github.com/huggingface/transformers/tree/main/examples/flax/language-modeling) | GPT2 | OSCAR | ✅ | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/causal_language_modeling_flax.ipynb) | [**`masked-language-modeling`**](https://github.com/huggingface/transformers/tree/main/examples/flax/language-modeling) | RoBERTa | OSCAR | ✅ | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/masked_language_modeling_flax.ipynb) | [**`text-classification`**](https://github.com/huggingface/transformers/tree/main/examples/flax/text-classification) | BERT | GLUE | ✅ | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification_flax.ipynb) ## Intro: JAX and Flax [JAX](https://github.com/google/jax) is a numerical computation library that exposes a NumPy-like API with tracing capabilities. With JAX's `jit`, you can trace pure functions and compile them into efficient, fused accelerator code on both GPU and TPU. JAX supports additional transformations such as `grad` (for arbitrary gradients), `pmap` (for parallelizing computation on multiple devices), `remat` (for gradient checkpointing), `vmap` (automatic efficient vectorization), and `pjit` (for automatically sharded model parallelism). All JAX transformations compose arbitrarily with each other -- e.g., efficiently computing per-example gradients is simply `vmap(grad(f))`. [Flax](https://github.com/google/flax) builds on top of JAX with an ergonomic module abstraction using Python dataclasses that leads to concise and explicit code. Flax's "lifted" JAX transformations (e.g. `vmap`, `remat`) allow you to nest JAX transformation and modules in any way you wish. Flax is the most widely used JAX library, with [129 dependent projects](https://github.com/google/flax/network/dependents?package_id=UGFja2FnZS01MjEyMjA2MA%3D%3D) as of May 2021. It is also the library underlying all of the official Cloud TPU JAX examples. ## Running on Cloud TPU All of our JAX/Flax models are designed to run efficiently on Google Cloud TPUs. Here is [a guide for running JAX on Google Cloud TPU](https://cloud.google.com/tpu/docs/jax-quickstart-tpu-vm). Consider applying for the [Google TPU Research Cloud project](https://sites.research.google/trc/) for free TPU compute. Each example README contains more details on the specific model and training procedure. ## Running on single or multiple GPUs All of our JAX/Flax examples also run efficiently on single and multiple GPUs. You can use the same instructions in the README to launch training on GPU. Distributed training is supported out-of-the box and scripts will use all the GPUs that are detected. You should follow this [guide for installing JAX on GPUs](https://github.com/google/jax/#pip-installation-gpu-cuda) since the installation depends on your CUDA and CuDNN version. ## Supported models Porting models from PyTorch to JAX/Flax is an ongoing effort. Feel free to reach out if you are interested in contributing a model in JAX/Flax -- we'll be adding a guide for porting models from PyTorch in the upcoming few weeks. For a complete overview of models that are supported in JAX/Flax, please have a look at [this](https://huggingface.co/transformers/main/index.html#supported-frameworks) table. Over 3000 pretrained checkpoints are supported in JAX/Flax as of May 2021. Click [here](https://huggingface.co/models?filter=jax) to see the full list on the 🤗 hub. ## Upload the trained/fine-tuned model to the Hub All the example scripts support automatic upload of your final model to the [Model Hub](https://huggingface.co/models) by adding a `--push_to_hub` argument. It will then create a repository with your username slash the name of the folder you are using as `output_dir`. For instance, `"sgugger/test-mrpc"` if your username is `sgugger` and you are working in the folder `~/tmp/test-mrpc`. To specify a given repository name, use the `--hub_model_id` argument. You will need to specify the whole repository name (including your username), for instance `--hub_model_id sgugger/finetuned-bert-mrpc`. To upload to an organization you are a member of, just use the name of that organization instead of your username: `--hub_model_id huggingface/finetuned-bert-mrpc`. A few notes on this integration: - you will need to be logged in to the Hugging Face website locally for it to work, the easiest way to achieve this is to run `huggingface-cli login` and then type your username and password when prompted. You can also pass along your authentication token with the `--hub_token` argument. - the `output_dir` you pick will either need to be a new folder or a local clone of the distant repository you are using.
transformers/examples/flax/README.md/0
{ "file_path": "transformers/examples/flax/README.md", "repo_id": "transformers", "token_count": 1866 }
284
# coding=utf-8 # Copyright 2020 The HuggingFace Team All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Post-processing utilities for question answering. """ import collections import json import logging import os from typing import Optional, Tuple import numpy as np from tqdm.auto import tqdm logger = logging.getLogger(__name__) def postprocess_qa_predictions( examples, features, predictions: Tuple[np.ndarray, np.ndarray], version_2_with_negative: bool = False, n_best_size: int = 20, max_answer_length: int = 30, null_score_diff_threshold: float = 0.0, output_dir: Optional[str] = None, prefix: Optional[str] = None, log_level: Optional[int] = logging.WARNING, ): """ Post-processes the predictions of a question-answering model to convert them to answers that are substrings of the original contexts. This is the base postprocessing functions for models that only return start and end logits. Args: examples: The non-preprocessed dataset (see the main script for more information). features: The processed dataset (see the main script for more information). predictions (:obj:`Tuple[np.ndarray, np.ndarray]`): The predictions of the model: two arrays containing the start logits and the end logits respectively. Its first dimension must match the number of elements of :obj:`features`. version_2_with_negative (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not the underlying dataset contains examples with no answers. n_best_size (:obj:`int`, `optional`, defaults to 20): The total number of n-best predictions to generate when looking for an answer. max_answer_length (:obj:`int`, `optional`, defaults to 30): The maximum length of an answer that can be generated. This is needed because the start and end predictions are not conditioned on one another. null_score_diff_threshold (:obj:`float`, `optional`, defaults to 0): The threshold used to select the null answer: if the best answer has a score that is less than the score of the null answer minus this threshold, the null answer is selected for this example (note that the score of the null answer for an example giving several features is the minimum of the scores for the null answer on each feature: all features must be aligned on the fact they `want` to predict a null answer). Only useful when :obj:`version_2_with_negative` is :obj:`True`. output_dir (:obj:`str`, `optional`): If provided, the dictionaries of predictions, n_best predictions (with their scores and logits) and, if :obj:`version_2_with_negative=True`, the dictionary of the scores differences between best and null answers, are saved in `output_dir`. prefix (:obj:`str`, `optional`): If provided, the dictionaries mentioned above are saved with `prefix` added to their names. log_level (:obj:`int`, `optional`, defaults to ``logging.WARNING``): ``logging`` log level (e.g., ``logging.WARNING``) """ if len(predictions) != 2: raise ValueError("`predictions` should be a tuple with two elements (start_logits, end_logits).") all_start_logits, all_end_logits = predictions if len(predictions[0]) != len(features): raise ValueError(f"Got {len(predictions[0])} predictions and {len(features)} features.") # Build a map example to its corresponding features. example_id_to_index = {k: i for i, k in enumerate(examples["id"])} features_per_example = collections.defaultdict(list) for i, feature in enumerate(features): features_per_example[example_id_to_index[feature["example_id"]]].append(i) # The dictionaries we have to fill. all_predictions = collections.OrderedDict() all_nbest_json = collections.OrderedDict() if version_2_with_negative: scores_diff_json = collections.OrderedDict() # Logging. logger.setLevel(log_level) logger.info(f"Post-processing {len(examples)} example predictions split into {len(features)} features.") # Let's loop over all the examples! for example_index, example in enumerate(tqdm(examples)): # Those are the indices of the features associated to the current example. feature_indices = features_per_example[example_index] min_null_prediction = None prelim_predictions = [] # Looping through all the features associated to the current example. for feature_index in feature_indices: # We grab the predictions of the model for this feature. start_logits = all_start_logits[feature_index] end_logits = all_end_logits[feature_index] # This is what will allow us to map some the positions in our logits to span of texts in the original # context. offset_mapping = features[feature_index]["offset_mapping"] # Optional `token_is_max_context`, if provided we will remove answers that do not have the maximum context # available in the current feature. token_is_max_context = features[feature_index].get("token_is_max_context", None) # Update minimum null prediction. feature_null_score = start_logits[0] + end_logits[0] if min_null_prediction is None or min_null_prediction["score"] > feature_null_score: min_null_prediction = { "offsets": (0, 0), "score": feature_null_score, "start_logit": start_logits[0], "end_logit": end_logits[0], } # Go through all possibilities for the `n_best_size` greater start and end logits. start_indexes = np.argsort(start_logits)[-1 : -n_best_size - 1 : -1].tolist() end_indexes = np.argsort(end_logits)[-1 : -n_best_size - 1 : -1].tolist() for start_index in start_indexes: for end_index in end_indexes: # Don't consider out-of-scope answers, either because the indices are out of bounds or correspond # to part of the input_ids that are not in the context. if ( start_index >= len(offset_mapping) or end_index >= len(offset_mapping) or offset_mapping[start_index] is None or len(offset_mapping[start_index]) < 2 or offset_mapping[end_index] is None or len(offset_mapping[end_index]) < 2 ): continue # Don't consider answers with a length that is either < 0 or > max_answer_length. if end_index < start_index or end_index - start_index + 1 > max_answer_length: continue # Don't consider answer that don't have the maximum context available (if such information is # provided). if token_is_max_context is not None and not token_is_max_context.get(str(start_index), False): continue prelim_predictions.append( { "offsets": (offset_mapping[start_index][0], offset_mapping[end_index][1]), "score": start_logits[start_index] + end_logits[end_index], "start_logit": start_logits[start_index], "end_logit": end_logits[end_index], } ) if version_2_with_negative and min_null_prediction is not None: # Add the minimum null prediction prelim_predictions.append(min_null_prediction) null_score = min_null_prediction["score"] # Only keep the best `n_best_size` predictions. predictions = sorted(prelim_predictions, key=lambda x: x["score"], reverse=True)[:n_best_size] # Add back the minimum null prediction if it was removed because of its low score. if ( version_2_with_negative and min_null_prediction is not None and not any(p["offsets"] == (0, 0) for p in predictions) ): predictions.append(min_null_prediction) # Use the offsets to gather the answer text in the original context. context = example["context"] for pred in predictions: offsets = pred.pop("offsets") pred["text"] = context[offsets[0] : offsets[1]] # In the very rare edge case we have not a single non-null prediction, we create a fake prediction to avoid # failure. if len(predictions) == 0 or (len(predictions) == 1 and predictions[0]["text"] == ""): predictions.insert(0, {"text": "empty", "start_logit": 0.0, "end_logit": 0.0, "score": 0.0}) # Compute the softmax of all scores (we do it with numpy to stay independent from torch/tf in this file, using # the LogSumExp trick). scores = np.array([pred.pop("score") for pred in predictions]) exp_scores = np.exp(scores - np.max(scores)) probs = exp_scores / exp_scores.sum() # Include the probabilities in our predictions. for prob, pred in zip(probs, predictions): pred["probability"] = prob # Pick the best prediction. If the null answer is not possible, this is easy. if not version_2_with_negative: all_predictions[example["id"]] = predictions[0]["text"] else: # Otherwise we first need to find the best non-empty prediction. i = 0 while predictions[i]["text"] == "": i += 1 best_non_null_pred = predictions[i] # Then we compare to the null prediction using the threshold. score_diff = null_score - best_non_null_pred["start_logit"] - best_non_null_pred["end_logit"] scores_diff_json[example["id"]] = float(score_diff) # To be JSON-serializable. if score_diff > null_score_diff_threshold: all_predictions[example["id"]] = "" else: all_predictions[example["id"]] = best_non_null_pred["text"] # Make `predictions` JSON-serializable by casting np.float back to float. all_nbest_json[example["id"]] = [ {k: (float(v) if isinstance(v, (np.float16, np.float32, np.float64)) else v) for k, v in pred.items()} for pred in predictions ] # If we have an output_dir, let's save all those dicts. if output_dir is not None: if not os.path.isdir(output_dir): raise EnvironmentError(f"{output_dir} is not a directory.") prediction_file = os.path.join( output_dir, "predictions.json" if prefix is None else f"{prefix}_predictions.json" ) nbest_file = os.path.join( output_dir, "nbest_predictions.json" if prefix is None else f"{prefix}_nbest_predictions.json" ) if version_2_with_negative: null_odds_file = os.path.join( output_dir, "null_odds.json" if prefix is None else f"{prefix}_null_odds.json" ) logger.info(f"Saving predictions to {prediction_file}.") with open(prediction_file, "w") as writer: writer.write(json.dumps(all_predictions, indent=4) + "\n") logger.info(f"Saving nbest_preds to {nbest_file}.") with open(nbest_file, "w") as writer: writer.write(json.dumps(all_nbest_json, indent=4) + "\n") if version_2_with_negative: logger.info(f"Saving null_odds to {null_odds_file}.") with open(null_odds_file, "w") as writer: writer.write(json.dumps(scores_diff_json, indent=4) + "\n") return all_predictions def postprocess_qa_predictions_with_beam_search( examples, features, predictions: Tuple[np.ndarray, np.ndarray], version_2_with_negative: bool = False, n_best_size: int = 20, max_answer_length: int = 30, start_n_top: int = 5, end_n_top: int = 5, output_dir: Optional[str] = None, prefix: Optional[str] = None, log_level: Optional[int] = logging.WARNING, ): """ Post-processes the predictions of a question-answering model with beam search to convert them to answers that are substrings of the original contexts. This is the postprocessing functions for models that return start and end logits, indices, as well as cls token predictions. Args: examples: The non-preprocessed dataset (see the main script for more information). features: The processed dataset (see the main script for more information). predictions (:obj:`Tuple[np.ndarray, np.ndarray]`): The predictions of the model: two arrays containing the start logits and the end logits respectively. Its first dimension must match the number of elements of :obj:`features`. version_2_with_negative (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not the underlying dataset contains examples with no answers. n_best_size (:obj:`int`, `optional`, defaults to 20): The total number of n-best predictions to generate when looking for an answer. max_answer_length (:obj:`int`, `optional`, defaults to 30): The maximum length of an answer that can be generated. This is needed because the start and end predictions are not conditioned on one another. start_n_top (:obj:`int`, `optional`, defaults to 5): The number of top start logits too keep when searching for the :obj:`n_best_size` predictions. end_n_top (:obj:`int`, `optional`, defaults to 5): The number of top end logits too keep when searching for the :obj:`n_best_size` predictions. output_dir (:obj:`str`, `optional`): If provided, the dictionaries of predictions, n_best predictions (with their scores and logits) and, if :obj:`version_2_with_negative=True`, the dictionary of the scores differences between best and null answers, are saved in `output_dir`. prefix (:obj:`str`, `optional`): If provided, the dictionaries mentioned above are saved with `prefix` added to their names. log_level (:obj:`int`, `optional`, defaults to ``logging.WARNING``): ``logging`` log level (e.g., ``logging.WARNING``) """ if len(predictions) != 5: raise ValueError("`predictions` should be a tuple with five elements.") start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits = predictions if len(predictions[0]) != len(features): raise ValueError(f"Got {len(predictions[0])} predictions and {len(features)} features.") # Build a map example to its corresponding features. example_id_to_index = {k: i for i, k in enumerate(examples["id"])} features_per_example = collections.defaultdict(list) for i, feature in enumerate(features): features_per_example[example_id_to_index[feature["example_id"]]].append(i) # The dictionaries we have to fill. all_predictions = collections.OrderedDict() all_nbest_json = collections.OrderedDict() scores_diff_json = collections.OrderedDict() if version_2_with_negative else None # Logging. logger.setLevel(log_level) logger.info(f"Post-processing {len(examples)} example predictions split into {len(features)} features.") # Let's loop over all the examples! for example_index, example in enumerate(tqdm(examples)): # Those are the indices of the features associated to the current example. feature_indices = features_per_example[example_index] min_null_score = None prelim_predictions = [] # Looping through all the features associated to the current example. for feature_index in feature_indices: # We grab the predictions of the model for this feature. start_log_prob = start_top_log_probs[feature_index] start_indexes = start_top_index[feature_index] end_log_prob = end_top_log_probs[feature_index] end_indexes = end_top_index[feature_index] feature_null_score = cls_logits[feature_index] # This is what will allow us to map some the positions in our logits to span of texts in the original # context. offset_mapping = features[feature_index]["offset_mapping"] # Optional `token_is_max_context`, if provided we will remove answers that do not have the maximum context # available in the current feature. token_is_max_context = features[feature_index].get("token_is_max_context", None) # Update minimum null prediction if min_null_score is None or feature_null_score < min_null_score: min_null_score = feature_null_score # Go through all possibilities for the `n_start_top`/`n_end_top` greater start and end logits. for i in range(start_n_top): for j in range(end_n_top): start_index = int(start_indexes[i]) j_index = i * end_n_top + j end_index = int(end_indexes[j_index]) # Don't consider out-of-scope answers (last part of the test should be unnecessary because of the # p_mask but let's not take any risk) if ( start_index >= len(offset_mapping) or end_index >= len(offset_mapping) or offset_mapping[start_index] is None or len(offset_mapping[start_index]) < 2 or offset_mapping[end_index] is None or len(offset_mapping[end_index]) < 2 ): continue # Don't consider answers with a length negative or > max_answer_length. if end_index < start_index or end_index - start_index + 1 > max_answer_length: continue # Don't consider answer that don't have the maximum context available (if such information is # provided). if token_is_max_context is not None and not token_is_max_context.get(str(start_index), False): continue prelim_predictions.append( { "offsets": (offset_mapping[start_index][0], offset_mapping[end_index][1]), "score": start_log_prob[i] + end_log_prob[j_index], "start_log_prob": start_log_prob[i], "end_log_prob": end_log_prob[j_index], } ) # Only keep the best `n_best_size` predictions. predictions = sorted(prelim_predictions, key=lambda x: x["score"], reverse=True)[:n_best_size] # Use the offsets to gather the answer text in the original context. context = example["context"] for pred in predictions: offsets = pred.pop("offsets") pred["text"] = context[offsets[0] : offsets[1]] # In the very rare edge case we have not a single non-null prediction, we create a fake prediction to avoid # failure. if len(predictions) == 0: # Without predictions min_null_score is going to be None and None will cause an exception later min_null_score = -2e-6 predictions.insert(0, {"text": "", "start_logit": -1e-6, "end_logit": -1e-6, "score": min_null_score}) # Compute the softmax of all scores (we do it with numpy to stay independent from torch/tf in this file, using # the LogSumExp trick). scores = np.array([pred.pop("score") for pred in predictions]) exp_scores = np.exp(scores - np.max(scores)) probs = exp_scores / exp_scores.sum() # Include the probabilities in our predictions. for prob, pred in zip(probs, predictions): pred["probability"] = prob # Pick the best prediction and set the probability for the null answer. all_predictions[example["id"]] = predictions[0]["text"] if version_2_with_negative: scores_diff_json[example["id"]] = float(min_null_score) # Make `predictions` JSON-serializable by casting np.float back to float. all_nbest_json[example["id"]] = [ {k: (float(v) if isinstance(v, (np.float16, np.float32, np.float64)) else v) for k, v in pred.items()} for pred in predictions ] # If we have an output_dir, let's save all those dicts. if output_dir is not None: if not os.path.isdir(output_dir): raise EnvironmentError(f"{output_dir} is not a directory.") prediction_file = os.path.join( output_dir, "predictions.json" if prefix is None else f"{prefix}_predictions.json" ) nbest_file = os.path.join( output_dir, "nbest_predictions.json" if prefix is None else f"{prefix}_nbest_predictions.json" ) if version_2_with_negative: null_odds_file = os.path.join( output_dir, "null_odds.json" if prefix is None else f"{prefix}_null_odds.json" ) logger.info(f"Saving predictions to {prediction_file}.") with open(prediction_file, "w") as writer: writer.write(json.dumps(all_predictions, indent=4) + "\n") logger.info(f"Saving nbest_preds to {nbest_file}.") with open(nbest_file, "w") as writer: writer.write(json.dumps(all_nbest_json, indent=4) + "\n") if version_2_with_negative: logger.info(f"Saving null_odds to {null_odds_file}.") with open(null_odds_file, "w") as writer: writer.write(json.dumps(scores_diff_json, indent=4) + "\n") return all_predictions, scores_diff_json
transformers/examples/flax/question-answering/utils_qa.py/0
{ "file_path": "transformers/examples/flax/question-answering/utils_qa.py", "repo_id": "transformers", "token_count": 9468 }
285
#!/usr/bin/env python # coding=utf-8 # Copyright 2021 The HuggingFace Team All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Pre-training/Fine-tuning ViT for image classification . Here is the full list of checkpoints on the hub that can be fine-tuned by this script: https://huggingface.co/models?filter=vit """ import logging import os import sys import time import warnings from dataclasses import asdict, dataclass, field from enum import Enum from pathlib import Path from typing import Callable, Optional import jax import jax.numpy as jnp import optax # for dataset and preprocessing import torch import torchvision import torchvision.transforms as transforms from flax import jax_utils from flax.jax_utils import pad_shard_unpad, unreplicate from flax.training import train_state from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key from huggingface_hub import HfApi from tqdm import tqdm import transformers from transformers import ( CONFIG_MAPPING, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, AutoConfig, FlaxAutoModelForImageClassification, HfArgumentParser, is_tensorboard_available, set_seed, ) from transformers.utils import send_example_telemetry logger = logging.getLogger(__name__) MODEL_CONFIG_CLASSES = list(FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class TrainingArguments: output_dir: str = field( metadata={"help": "The output directory where the model predictions and checkpoints will be written."}, ) overwrite_output_dir: bool = field( default=False, metadata={ "help": ( "Overwrite the content of the output directory. " "Use this to continue training if output_dir points to a checkpoint directory." ) }, ) do_train: bool = field(default=False, metadata={"help": "Whether to run training."}) do_eval: bool = field(default=False, metadata={"help": "Whether to run eval on the dev set."}) per_device_train_batch_size: int = field( default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for training."} ) per_device_eval_batch_size: int = field( default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for evaluation."} ) learning_rate: float = field(default=5e-5, metadata={"help": "The initial learning rate for AdamW."}) weight_decay: float = field(default=0.0, metadata={"help": "Weight decay for AdamW if we apply some."}) adam_beta1: float = field(default=0.9, metadata={"help": "Beta1 for AdamW optimizer"}) adam_beta2: float = field(default=0.999, metadata={"help": "Beta2 for AdamW optimizer"}) adam_epsilon: float = field(default=1e-8, metadata={"help": "Epsilon for AdamW optimizer."}) adafactor: bool = field(default=False, metadata={"help": "Whether or not to replace AdamW by Adafactor."}) num_train_epochs: float = field(default=3.0, metadata={"help": "Total number of training epochs to perform."}) warmup_steps: int = field(default=0, metadata={"help": "Linear warmup over warmup_steps."}) logging_steps: int = field(default=500, metadata={"help": "Log every X updates steps."}) save_steps: int = field(default=500, metadata={"help": "Save checkpoint every X updates steps."}) eval_steps: int = field(default=None, metadata={"help": "Run an evaluation every X steps."}) seed: int = field(default=42, metadata={"help": "Random seed that will be set at the beginning of training."}) push_to_hub: bool = field( default=False, metadata={"help": "Whether or not to upload the trained model to the model hub after training."} ) hub_model_id: str = field( default=None, metadata={"help": "The name of the repository to keep in sync with the local `output_dir`."} ) hub_token: str = field(default=None, metadata={"help": "The token to use to push to the Model Hub."}) def __post_init__(self): if self.output_dir is not None: self.output_dir = os.path.expanduser(self.output_dir) def to_dict(self): """ Serializes this instance while replace `Enum` by their values (for JSON serialization support). It obfuscates the token values by removing their value. """ d = asdict(self) for k, v in d.items(): if isinstance(v, Enum): d[k] = v.value if isinstance(v, list) and len(v) > 0 and isinstance(v[0], Enum): d[k] = [x.value for x in v] if k.endswith("_token"): d[k] = f"<{k.upper()}>" return d @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. """ model_name_or_path: Optional[str] = field( default=None, metadata={ "help": ( "The model checkpoint for weights initialization. Don't set if you want to train a model from scratch." ) }, ) model_type: Optional[str] = field( default=None, metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)}, ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} ) dtype: Optional[str] = field( default="float32", metadata={ "help": ( "Floating-point format in which the model weights should be initialized and trained. Choose one of" " `[float32, float16, bfloat16]`." ) }, ) token: str = field( default=None, metadata={ "help": ( "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token " "generated when running `huggingface-cli login` (stored in `~/.huggingface`)." ) }, ) use_auth_token: bool = field( default=None, metadata={ "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token` instead." }, ) trust_remote_code: bool = field( default=False, metadata={ "help": ( "Whether or not to allow for custom models defined on the Hub in their own modeling files. This option " "should only be set to `True` for repositories you trust and in which you have read the code, as it will " "execute code present on the Hub on your local machine." ) }, ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ train_dir: str = field( metadata={"help": "Path to the root training directory which contains one subdirectory per class."} ) validation_dir: str = field( metadata={"help": "Path to the root validation directory which contains one subdirectory per class."}, ) image_size: Optional[int] = field(default=224, metadata={"help": " The size (resolution) of each image."}) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) class TrainState(train_state.TrainState): dropout_rng: jnp.ndarray def replicate(self): return jax_utils.replicate(self).replace(dropout_rng=shard_prng_key(self.dropout_rng)) def write_metric(summary_writer, train_metrics, eval_metrics, train_time, step): summary_writer.scalar("train_time", train_time, step) train_metrics = get_metrics(train_metrics) for key, vals in train_metrics.items(): tag = f"train_{key}" for i, val in enumerate(vals): summary_writer.scalar(tag, val, step - len(vals) + i + 1) for metric_name, value in eval_metrics.items(): summary_writer.scalar(f"eval_{metric_name}", value, step) def create_learning_rate_fn( train_ds_size: int, train_batch_size: int, num_train_epochs: int, num_warmup_steps: int, learning_rate: float ) -> Callable[[int], jnp.ndarray]: """Returns a linear warmup, linear_decay learning rate function.""" steps_per_epoch = train_ds_size // train_batch_size num_train_steps = steps_per_epoch * num_train_epochs warmup_fn = optax.linear_schedule(init_value=0.0, end_value=learning_rate, transition_steps=num_warmup_steps) decay_fn = optax.linear_schedule( init_value=learning_rate, end_value=0, transition_steps=num_train_steps - num_warmup_steps ) schedule_fn = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[num_warmup_steps]) return schedule_fn def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() if model_args.use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token` instead.", FutureWarning, ) if model_args.token is not None: raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") model_args.token = model_args.use_auth_token # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_image_classification", model_args, data_args, framework="flax") if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR) if jax.process_index() == 0: transformers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() # Set the verbosity to info of the Transformers logger (on main process only): logger.info(f"Training/evaluation parameters {training_args}") # set seed for random transforms and torch dataloaders set_seed(training_args.seed) # Handle the repository creation if training_args.push_to_hub: # Retrieve of infer repo_name repo_name = training_args.hub_model_id if repo_name is None: repo_name = Path(training_args.output_dir).absolute().name # Create repo and retrieve repo_id api = HfApi() repo_id = api.create_repo(repo_name, exist_ok=True, token=training_args.hub_token).repo_id # Initialize datasets and pre-processing transforms # We use torchvision here for faster pre-processing # Note that here we are using some default pre-processing, for maximum accuracy # one should tune this part and carefully select what transformations to use. normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) train_dataset = torchvision.datasets.ImageFolder( data_args.train_dir, transforms.Compose( [ transforms.RandomResizedCrop(data_args.image_size), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize, ] ), ) eval_dataset = torchvision.datasets.ImageFolder( data_args.validation_dir, transforms.Compose( [ transforms.Resize(data_args.image_size), transforms.CenterCrop(data_args.image_size), transforms.ToTensor(), normalize, ] ), ) # Load pretrained model and tokenizer if model_args.config_name: config = AutoConfig.from_pretrained( model_args.config_name, num_labels=len(train_dataset.classes), image_size=data_args.image_size, cache_dir=model_args.cache_dir, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) elif model_args.model_name_or_path: config = AutoConfig.from_pretrained( model_args.model_name_or_path, num_labels=len(train_dataset.classes), image_size=data_args.image_size, cache_dir=model_args.cache_dir, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) else: config = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") if model_args.model_name_or_path: model = FlaxAutoModelForImageClassification.from_pretrained( model_args.model_name_or_path, config=config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype), token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) else: model = FlaxAutoModelForImageClassification.from_config( config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype), trust_remote_code=model_args.trust_remote_code, ) # Store some constant num_epochs = int(training_args.num_train_epochs) train_batch_size = int(training_args.per_device_train_batch_size) * jax.device_count() per_device_eval_batch_size = int(training_args.per_device_eval_batch_size) eval_batch_size = per_device_eval_batch_size * jax.device_count() steps_per_epoch = len(train_dataset) // train_batch_size total_train_steps = steps_per_epoch * num_epochs def collate_fn(examples): pixel_values = torch.stack([example[0] for example in examples]) labels = torch.tensor([example[1] for example in examples]) batch = {"pixel_values": pixel_values, "labels": labels} batch = {k: v.numpy() for k, v in batch.items()} return batch # Create data loaders train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=train_batch_size, shuffle=True, num_workers=data_args.preprocessing_num_workers, persistent_workers=True, drop_last=True, collate_fn=collate_fn, ) eval_loader = torch.utils.data.DataLoader( eval_dataset, batch_size=eval_batch_size, shuffle=False, num_workers=data_args.preprocessing_num_workers, persistent_workers=True, drop_last=False, collate_fn=collate_fn, ) # Enable tensorboard only on the master node has_tensorboard = is_tensorboard_available() if has_tensorboard and jax.process_index() == 0: try: from flax.metrics.tensorboard import SummaryWriter summary_writer = SummaryWriter(log_dir=Path(training_args.output_dir)) except ImportError as ie: has_tensorboard = False logger.warning( f"Unable to display metrics through TensorBoard because some package are not installed: {ie}" ) else: logger.warning( "Unable to display metrics through TensorBoard because the package is not installed: " "Please run pip install tensorboard to enable." ) # Initialize our training rng = jax.random.PRNGKey(training_args.seed) rng, dropout_rng = jax.random.split(rng) # Create learning rate schedule linear_decay_lr_schedule_fn = create_learning_rate_fn( len(train_dataset), train_batch_size, training_args.num_train_epochs, training_args.warmup_steps, training_args.learning_rate, ) # create adam optimizer adamw = optax.adamw( learning_rate=linear_decay_lr_schedule_fn, b1=training_args.adam_beta1, b2=training_args.adam_beta2, eps=training_args.adam_epsilon, weight_decay=training_args.weight_decay, ) # Setup train state state = TrainState.create(apply_fn=model.__call__, params=model.params, tx=adamw, dropout_rng=dropout_rng) def loss_fn(logits, labels): loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])) return loss.mean() # Define gradient update step fn def train_step(state, batch): dropout_rng, new_dropout_rng = jax.random.split(state.dropout_rng) def compute_loss(params): labels = batch.pop("labels") logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0] loss = loss_fn(logits, labels) return loss grad_fn = jax.value_and_grad(compute_loss) loss, grad = grad_fn(state.params) grad = jax.lax.pmean(grad, "batch") new_state = state.apply_gradients(grads=grad, dropout_rng=new_dropout_rng) metrics = {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)} metrics = jax.lax.pmean(metrics, axis_name="batch") return new_state, metrics # Define eval fn def eval_step(params, batch): labels = batch.pop("labels") logits = model(**batch, params=params, train=False)[0] loss = loss_fn(logits, labels) # summarize metrics accuracy = (jnp.argmax(logits, axis=-1) == labels).mean() metrics = {"loss": loss, "accuracy": accuracy} metrics = jax.lax.pmean(metrics, axis_name="batch") return metrics # Create parallel version of the train and eval step p_train_step = jax.pmap(train_step, "batch", donate_argnums=(0,)) p_eval_step = jax.pmap(eval_step, "batch") # Replicate the train state on each device state = state.replicate() logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {num_epochs}") logger.info(f" Instantaneous batch size per device = {training_args.per_device_train_batch_size}") logger.info(f" Total train batch size (w. parallel & distributed) = {train_batch_size}") logger.info(f" Total optimization steps = {total_train_steps}") train_time = 0 epochs = tqdm(range(num_epochs), desc=f"Epoch ... (1/{num_epochs})", position=0) for epoch in epochs: # ======================== Training ================================ train_start = time.time() # Create sampling rng rng, input_rng = jax.random.split(rng) train_metrics = [] steps_per_epoch = len(train_dataset) // train_batch_size train_step_progress_bar = tqdm(total=steps_per_epoch, desc="Training...", position=1, leave=False) # train for batch in train_loader: batch = shard(batch) state, train_metric = p_train_step(state, batch) train_metrics.append(train_metric) train_step_progress_bar.update(1) train_time += time.time() - train_start train_metric = unreplicate(train_metric) train_step_progress_bar.close() epochs.write( f"Epoch... ({epoch + 1}/{num_epochs} | Loss: {train_metric['loss']}, Learning Rate:" f" {train_metric['learning_rate']})" ) # ======================== Evaluating ============================== eval_metrics = [] eval_steps = len(eval_dataset) // eval_batch_size eval_step_progress_bar = tqdm(total=eval_steps, desc="Evaluating...", position=2, leave=False) for batch in eval_loader: # Model forward metrics = pad_shard_unpad(p_eval_step, static_return=True)( state.params, batch, min_device_batch=per_device_eval_batch_size ) eval_metrics.append(metrics) eval_step_progress_bar.update(1) # normalize eval metrics eval_metrics = get_metrics(eval_metrics) eval_metrics = jax.tree_util.tree_map(jnp.mean, eval_metrics) # Print metrics and update progress bar eval_step_progress_bar.close() desc = ( f"Epoch... ({epoch + 1}/{num_epochs} | Eval Loss: {round(eval_metrics['loss'].item(), 4)} | " f"Eval Accuracy: {round(eval_metrics['accuracy'].item(), 4)})" ) epochs.write(desc) epochs.desc = desc # Save metrics if has_tensorboard and jax.process_index() == 0: cur_step = epoch * (len(train_dataset) // train_batch_size) write_metric(summary_writer, train_metrics, eval_metrics, train_time, cur_step) # save checkpoint after each epoch and push checkpoint to the hub if jax.process_index() == 0: params = jax.device_get(jax.tree_util.tree_map(lambda x: x[0], state.params)) model.save_pretrained(training_args.output_dir, params=params) if training_args.push_to_hub: api.upload_folder( commit_message=f"Saving weights and logs of epoch {epoch}", folder_path=training_args.output_dir, repo_id=repo_id, repo_type="model", token=training_args.hub_token, ) if __name__ == "__main__": main()
transformers/examples/flax/vision/run_image_classification.py/0
{ "file_path": "transformers/examples/flax/vision/run_image_classification.py", "repo_id": "transformers", "token_count": 9849 }
286
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Finetuning the library models for question-answering on SQuAD (DistilBERT, Bert, XLM, XLNet).""" import argparse import glob import logging import os import random import timeit import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm, trange import transformers from transformers import ( MODEL_FOR_QUESTION_ANSWERING_MAPPING, WEIGHTS_NAME, AdamW, AutoConfig, AutoModelForQuestionAnswering, AutoTokenizer, get_linear_schedule_with_warmup, squad_convert_examples_to_features, ) from transformers.data.metrics.squad_metrics import ( compute_predictions_log_probs, compute_predictions_logits, squad_evaluate, ) from transformers.data.processors.squad import SquadResult, SquadV1Processor, SquadV2Processor from transformers.trainer_utils import is_main_process try: from torch.utils.tensorboard import SummaryWriter except ImportError: from tensorboardX import SummaryWriter logger = logging.getLogger(__name__) MODEL_CONFIG_CLASSES = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def to_list(tensor): return tensor.detach().cpu().tolist() def train(args, train_dataset, model, tokenizer): """Train the model""" if args.local_rank in [-1, 0]: tb_writer = SummaryWriter() args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay, }, {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0}, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total ) # Check if saved optimizer or scheduler states exist if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile( os.path.join(args.model_name_or_path, "scheduler.pt") ): # Load in optimizer and scheduler states optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt"))) scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt"))) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True ) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1), ) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 1 epochs_trained = 0 steps_trained_in_current_epoch = 0 # Check if continuing training from a checkpoint if os.path.exists(args.model_name_or_path): try: # set global_step to global_step of last saved checkpoint from model path checkpoint_suffix = args.model_name_or_path.split("-")[-1].split("/")[0] global_step = int(checkpoint_suffix) epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps) steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps) logger.info(" Continuing training from checkpoint, will skip to saved global_step") logger.info(" Continuing training from epoch %d", epochs_trained) logger.info(" Continuing training from global step %d", global_step) logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch) except ValueError: logger.info(" Starting fine-tuning.") tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange( epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0] ) # Added here for reproducibility set_seed(args) for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): # Skip past any already trained steps if resuming training if steps_trained_in_current_epoch > 0: steps_trained_in_current_epoch -= 1 continue model.train() batch = tuple(t.to(args.device) for t in batch) inputs = { "input_ids": batch[0], "attention_mask": batch[1], "token_type_ids": batch[2], "start_positions": batch[3], "end_positions": batch[4], } if args.model_type in ["xlm", "roberta", "distilbert", "camembert", "bart", "longformer"]: del inputs["token_type_ids"] if args.model_type in ["xlnet", "xlm"]: inputs.update({"cls_index": batch[5], "p_mask": batch[6]}) if args.version_2_with_negative: inputs.update({"is_impossible": batch[7]}) if hasattr(model, "config") and hasattr(model.config, "lang2id"): inputs.update( {"langs": (torch.ones(batch[0].shape, dtype=torch.int64) * args.lang_id).to(args.device)} ) outputs = model(**inputs) # model outputs are always tuple in transformers (see doc) loss = outputs[0] if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 # Log metrics if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # Only evaluate when single GPU otherwise metrics may not average well if args.local_rank == -1 and args.evaluate_during_training: results = evaluate(args, model, tokenizer) for key, value in results.items(): tb_writer.add_scalar("eval_{}".format(key), value, global_step) tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step) tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step) logging_loss = tr_loss # Save model checkpoint if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step)) # Take care of distributed/parallel training model_to_save = model.module if hasattr(model, "module") else model model_to_save.save_pretrained(output_dir) tokenizer.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, "training_args.bin")) logger.info("Saving model checkpoint to %s", output_dir) torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")) torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")) logger.info("Saving optimizer and scheduler states to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step def evaluate(args, model, tokenizer, prefix=""): dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True) if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(dataset) eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) # multi-gpu evaluate if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel): model = torch.nn.DataParallel(model) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(dataset)) logger.info(" Batch size = %d", args.eval_batch_size) all_results = [] start_time = timeit.default_timer() for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = { "input_ids": batch[0], "attention_mask": batch[1], "token_type_ids": batch[2], } if args.model_type in ["xlm", "roberta", "distilbert", "camembert", "bart", "longformer"]: del inputs["token_type_ids"] feature_indices = batch[3] # XLNet and XLM use more arguments for their predictions if args.model_type in ["xlnet", "xlm"]: inputs.update({"cls_index": batch[4], "p_mask": batch[5]}) # for lang_id-sensitive xlm models if hasattr(model, "config") and hasattr(model.config, "lang2id"): inputs.update( {"langs": (torch.ones(batch[0].shape, dtype=torch.int64) * args.lang_id).to(args.device)} ) outputs = model(**inputs) for i, feature_index in enumerate(feature_indices): eval_feature = features[feature_index.item()] unique_id = int(eval_feature.unique_id) output = [to_list(output[i]) for output in outputs.to_tuple()] # Some models (XLNet, XLM) use 5 arguments for their predictions, while the other "simpler" # models only use two. if len(output) >= 5: start_logits = output[0] start_top_index = output[1] end_logits = output[2] end_top_index = output[3] cls_logits = output[4] result = SquadResult( unique_id, start_logits, end_logits, start_top_index=start_top_index, end_top_index=end_top_index, cls_logits=cls_logits, ) else: start_logits, end_logits = output result = SquadResult(unique_id, start_logits, end_logits) all_results.append(result) evalTime = timeit.default_timer() - start_time logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(dataset)) # Compute predictions output_prediction_file = os.path.join(args.output_dir, "predictions_{}.json".format(prefix)) output_nbest_file = os.path.join(args.output_dir, "nbest_predictions_{}.json".format(prefix)) if args.version_2_with_negative: output_null_log_odds_file = os.path.join(args.output_dir, "null_odds_{}.json".format(prefix)) else: output_null_log_odds_file = None # XLNet and XLM use a more complex post-processing procedure if args.model_type in ["xlnet", "xlm"]: start_n_top = model.config.start_n_top if hasattr(model, "config") else model.module.config.start_n_top end_n_top = model.config.end_n_top if hasattr(model, "config") else model.module.config.end_n_top predictions = compute_predictions_log_probs( examples, features, all_results, args.n_best_size, args.max_answer_length, output_prediction_file, output_nbest_file, output_null_log_odds_file, start_n_top, end_n_top, args.version_2_with_negative, tokenizer, args.verbose_logging, ) else: predictions = compute_predictions_logits( examples, features, all_results, args.n_best_size, args.max_answer_length, args.do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file, args.verbose_logging, args.version_2_with_negative, args.null_score_diff_threshold, tokenizer, ) # Compute the F1 and exact scores. results = squad_evaluate(examples, predictions) return results def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False): if args.local_rank not in [-1, 0] and not evaluate: # Make sure only the first process in distributed training process the dataset, and the others will use the cache torch.distributed.barrier() # Load data features from cache or dataset file input_dir = args.data_dir if args.data_dir else "." cached_features_file = os.path.join( input_dir, "cached_{}_{}_{}".format( "dev" if evaluate else "train", list(filter(None, args.model_name_or_path.split("/"))).pop(), str(args.max_seq_length), ), ) # Init features and dataset from cache if it exists if os.path.exists(cached_features_file) and not args.overwrite_cache: logger.info("Loading features from cached file %s", cached_features_file) features_and_dataset = torch.load(cached_features_file) features, dataset, examples = ( features_and_dataset["features"], features_and_dataset["dataset"], features_and_dataset["examples"], ) else: logger.info("Creating features from dataset file at %s", input_dir) if not args.data_dir and ((evaluate and not args.predict_file) or (not evaluate and not args.train_file)): try: import tensorflow_datasets as tfds except ImportError: raise ImportError("If not data_dir is specified, tensorflow_datasets needs to be installed.") if args.version_2_with_negative: logger.warning("tensorflow_datasets does not handle version 2 of SQuAD.") tfds_examples = tfds.load("squad") examples = SquadV1Processor().get_examples_from_dataset(tfds_examples, evaluate=evaluate) else: processor = SquadV2Processor() if args.version_2_with_negative else SquadV1Processor() if evaluate: examples = processor.get_dev_examples(args.data_dir, filename=args.predict_file) else: examples = processor.get_train_examples(args.data_dir, filename=args.train_file) features, dataset = squad_convert_examples_to_features( examples=examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=not evaluate, return_dataset="pt", threads=args.threads, ) if args.local_rank in [-1, 0]: logger.info("Saving features into cached file %s", cached_features_file) torch.save({"features": features, "dataset": dataset, "examples": examples}, cached_features_file) if args.local_rank == 0 and not evaluate: # Make sure only the first process in distributed training process the dataset, and the others will use the cache torch.distributed.barrier() if output_examples: return dataset, examples, features return dataset def main(): parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(MODEL_TYPES), ) parser.add_argument( "--model_name_or_path", default=None, type=str, required=True, help="Path to pretrained model or model identifier from huggingface.co/models", ) parser.add_argument( "--output_dir", default=None, type=str, required=True, help="The output directory where the model checkpoints and predictions will be written.", ) # Other parameters parser.add_argument( "--data_dir", default=None, type=str, help="The input data dir. Should contain the .json files for the task." + "If no data dir or train/predict files are specified, will run with tensorflow_datasets.", ) parser.add_argument( "--train_file", default=None, type=str, help="The input training file. If a data dir is specified, will look for the file there" + "If no data dir or train/predict files are specified, will run with tensorflow_datasets.", ) parser.add_argument( "--predict_file", default=None, type=str, help="The input evaluation file. If a data dir is specified, will look for the file there" + "If no data dir or train/predict files are specified, will run with tensorflow_datasets.", ) parser.add_argument( "--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name" ) parser.add_argument( "--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from huggingface.co", ) parser.add_argument( "--version_2_with_negative", action="store_true", help="If true, the SQuAD examples contain some that do not have an answer.", ) parser.add_argument( "--null_score_diff_threshold", type=float, default=0.0, help="If null_score - best_non_null is greater than the threshold predict null.", ) parser.add_argument( "--max_seq_length", default=384, type=int, help=( "The maximum total input sequence length after WordPiece tokenization. Sequences " "longer than this will be truncated, and sequences shorter than this will be padded." ), ) parser.add_argument( "--doc_stride", default=128, type=int, help="When splitting up a long document into chunks, how much stride to take between chunks.", ) parser.add_argument( "--max_query_length", default=64, type=int, help=( "The maximum number of tokens for the question. Questions longer than this will " "be truncated to this length." ), ) parser.add_argument("--do_train", action="store_true", help="Whether to run training.") parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.") parser.add_argument( "--evaluate_during_training", action="store_true", help="Run evaluation during training at each logging step." ) parser.add_argument( "--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model." ) parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") parser.add_argument( "--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation." ) parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument( "--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform." ) parser.add_argument( "--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.", ) parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument( "--n_best_size", default=20, type=int, help="The total number of n-best predictions to generate in the nbest_predictions.json output file.", ) parser.add_argument( "--max_answer_length", default=30, type=int, help=( "The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another." ), ) parser.add_argument( "--verbose_logging", action="store_true", help=( "If true, all of the warnings related to data processing will be printed. " "A number of warnings are expected for a normal SQuAD evaluation." ), ) parser.add_argument( "--lang_id", default=0, type=int, help=( "language id of input for language-specific xlm models (see" " tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)" ), ) parser.add_argument("--logging_steps", type=int, default=500, help="Log every X updates steps.") parser.add_argument("--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.") parser.add_argument( "--eval_all_checkpoints", action="store_true", help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number", ) parser.add_argument("--no_cuda", action="store_true", help="Whether not to use CUDA when available") parser.add_argument( "--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory" ) parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" ) parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument( "--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", ) parser.add_argument( "--fp16_opt_level", type=str, default="O1", help=( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. " "See details at https://nvidia.github.io/apex/amp.html" ), ) parser.add_argument("--server_ip", type=str, default="", help="Can be used for distant debugging.") parser.add_argument("--server_port", type=str, default="", help="Can be used for distant debugging.") parser.add_argument("--threads", type=int, default=1, help="multiple threads for converting example to features") args = parser.parse_args() if args.doc_stride >= args.max_seq_length - args.max_query_length: logger.warning( "WARNING - You've set a doc stride which may be superior to the document length in some " "examples. This could result in errors when building features from the examples. Please reduce the doc " "stride or increase the maximum length to ensure the features are correctly built." ) if ( os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir ): raise ValueError( "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format( args.output_dir ) ) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count() else: # Initializes the distributed backend which will take care of synchronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend="nccl") args.n_gpu = 1 args.device = device # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16, ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set seed set_seed(args) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: # Make sure only the first process in distributed training will download model & vocab torch.distributed.barrier() args.model_type = args.model_type.lower() config = AutoConfig.from_pretrained( args.config_name if args.config_name else args.model_name_or_path, cache_dir=args.cache_dir if args.cache_dir else None, ) tokenizer = AutoTokenizer.from_pretrained( args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None, use_fast=False, # SquadDataset is not compatible with Fast tokenizers which have a smarter overflow handeling ) model = AutoModelForQuestionAnswering.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, cache_dir=args.cache_dir if args.cache_dir else None, ) if args.local_rank == 0: # Make sure only the first process in distributed training will download model & vocab torch.distributed.barrier() model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is set. # Otherwise it'll default to "promote" mode, and we'll get fp32 operations. Note that running `--fp16_opt_level="O2"` will # remove the need for this code, but it is still valid. if args.fp16: try: import apex apex.amp.register_half_function(torch, "einsum") except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") # Training if args.do_train: train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False) global_step, tr_loss = train(args, train_dataset, model, tokenizer) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Save the trained model and the tokenizer if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` # Take care of distributed/parallel training model_to_save = model.module if hasattr(model, "module") else model model_to_save.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, "training_args.bin")) # Load a trained model and vocabulary that you have fine-tuned model = AutoModelForQuestionAnswering.from_pretrained(args.output_dir) # , force_download=True) # SquadDataset is not compatible with Fast tokenizers which have a smarter overflow handeling # So we use use_fast=False here for now until Fast-tokenizer-compatible-examples are out tokenizer = AutoTokenizer.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case, use_fast=False) model.to(args.device) # Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory results = {} if args.do_eval and args.local_rank in [-1, 0]: if args.do_train: logger.info("Loading checkpoints saved during training for evaluation") checkpoints = [args.output_dir] if args.eval_all_checkpoints: checkpoints = [ os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True)) ] else: logger.info("Loading checkpoint %s for evaluation", args.model_name_or_path) checkpoints = [args.model_name_or_path] logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: # Reload the model global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else "" model = AutoModelForQuestionAnswering.from_pretrained(checkpoint) # , force_download=True) model.to(args.device) # Evaluate result = evaluate(args, model, tokenizer, prefix=global_step) result = {k + ("_{}".format(global_step) if global_step else ""): v for k, v in result.items()} results.update(result) logger.info("Results: {}".format(results)) return results if __name__ == "__main__": main()
transformers/examples/legacy/question-answering/run_squad.py/0
{ "file_path": "transformers/examples/legacy/question-answering/run_squad.py", "repo_id": "transformers", "token_count": 14759 }
287
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import defaultdict from pathlib import Path import pandas as pd from rouge_cli import calculate_rouge_path from utils import calculate_rouge PRED = [ 'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the' ' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe' " depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.", "The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal" " accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's" " founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the" " body.", "Amnesty International releases its annual report on the death penalty. The report catalogs the use of" " state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the" " world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital" " punishment.", ] TGT = [ 'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .' ' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz' " had informed his Lufthansa training school of an episode of severe depression, airline says .", "Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ." " Israel and the United States opposed the move, which could open the door to war crimes investigations against" " Israelis .", "Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to" " death . Organization claims that governments around the world are using the threat of terrorism to advance" " executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death" " sentences up by 28% .", ] def test_disaggregated_scores_are_determinstic(): no_aggregation = calculate_rouge(PRED, TGT, bootstrap_aggregation=False, rouge_keys=["rouge2", "rougeL"]) assert isinstance(no_aggregation, defaultdict) no_aggregation_just_r2 = calculate_rouge(PRED, TGT, bootstrap_aggregation=False, rouge_keys=["rouge2"]) assert ( pd.DataFrame(no_aggregation["rouge2"]).fmeasure.mean() == pd.DataFrame(no_aggregation_just_r2["rouge2"]).fmeasure.mean() ) def test_newline_cnn_improvement(): k = "rougeLsum" score = calculate_rouge(PRED, TGT, newline_sep=True, rouge_keys=[k])[k] score_no_sep = calculate_rouge(PRED, TGT, newline_sep=False, rouge_keys=[k])[k] assert score > score_no_sep def test_newline_irrelevant_for_other_metrics(): k = ["rouge1", "rouge2", "rougeL"] score_sep = calculate_rouge(PRED, TGT, newline_sep=True, rouge_keys=k) score_no_sep = calculate_rouge(PRED, TGT, newline_sep=False, rouge_keys=k) assert score_sep == score_no_sep def test_single_sent_scores_dont_depend_on_newline_sep(): pred = [ "Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.", 'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .', ] tgt = [ "Margot Frank, died in 1945, a month earlier than previously thought.", 'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of' " the final seconds on board Flight 9525.", ] assert calculate_rouge(pred, tgt, newline_sep=True) == calculate_rouge(pred, tgt, newline_sep=False) def test_pegasus_newline(): pred = [ """" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" """ ] tgt = [ """ Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .""" ] prev_score = calculate_rouge(pred, tgt, rouge_keys=["rougeLsum"], newline_sep=False)["rougeLsum"] new_score = calculate_rouge(pred, tgt, rouge_keys=["rougeLsum"])["rougeLsum"] assert new_score > prev_score def test_rouge_cli(): data_dir = Path("examples/seq2seq/test_data/wmt_en_ro") metrics = calculate_rouge_path(data_dir.joinpath("test.source"), data_dir.joinpath("test.target")) assert isinstance(metrics, dict) metrics_default_dict = calculate_rouge_path( data_dir.joinpath("test.source"), data_dir.joinpath("test.target"), bootstrap_aggregation=False ) assert isinstance(metrics_default_dict, defaultdict)
transformers/examples/legacy/seq2seq/old_test_calculate_rouge.py/0
{ "file_path": "transformers/examples/legacy/seq2seq/old_test_calculate_rouge.py", "repo_id": "transformers", "token_count": 1793 }
288
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_xla_available logger = logging.get_logger(__name__) arg_to_scheduler = { "linear": get_linear_schedule_with_warmup, "cosine": get_cosine_schedule_with_warmup, "cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup, "polynomial": get_polynomial_decay_schedule_with_warmup, "constant": get_constant_schedule, "constant_w_warmup": get_constant_schedule_with_warmup, } class Seq2SeqTrainer(Trainer): def __init__(self, config=None, data_args=None, *args, **kwargs): super().__init__(*args, **kwargs) if config is None: assert isinstance(self.model, PreTrainedModel), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" f" {self.model.__class__}" ) self.config = self.model.config else: self.config = config self.data_args = data_args self.vocab_size = self.config.tgt_vocab_size if isinstance(self.config, FSMTConfig) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correctly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( f"The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for" " padding.." ) if self.args.label_smoothing == 0: self.loss_fn = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss self.loss_fn = label_smoothed_nll_loss def create_optimizer_and_scheduler(self, num_training_steps: int): """ Setup the optimizer and the learning rate scheduler. We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the Trainer's init through :obj:`optimizers`, or subclass and override this method in a subclass. """ if self.optimizer is None: no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": self.args.weight_decay, }, { "params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, ] optimizer_cls = Adafactor if self.args.adafactor else AdamW if self.args.adafactor: optimizer_cls = Adafactor optimizer_kwargs = {"scale_parameter": False, "relative_step": False} else: optimizer_cls = AdamW optimizer_kwargs = { "betas": (self.args.adam_beta1, self.args.adam_beta2), "eps": self.args.adam_epsilon, } optimizer_kwargs["lr"] = self.args.learning_rate self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs) if self.lr_scheduler is None: self.lr_scheduler = self._get_lr_scheduler(num_training_steps) else: # ignoring --lr_scheduler logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.") def _get_lr_scheduler(self, num_training_steps): schedule_func = arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": scheduler = schedule_func(self.optimizer) elif self.args.lr_scheduler == "constant_w_warmup": scheduler = schedule_func(self.optimizer, num_warmup_steps=self.args.warmup_steps) else: scheduler = schedule_func( self.optimizer, num_warmup_steps=self.args.warmup_steps, num_training_steps=num_training_steps ) return scheduler def _get_train_sampler(self) -> Optional[torch.utils.data.Sampler]: if isinstance(self.train_dataset, torch.utils.data.IterableDataset): return None elif is_torch_xla_available(): return get_tpu_sampler(self.train_dataset) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size, distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED), ) return ( RandomSampler(self.train_dataset) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset) ) def _compute_loss(self, model, inputs, labels): if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token logits = model(**inputs, use_cache=False)[0] loss = self.loss_fn(logits.view(-1, logits.shape[-1]), labels.view(-1)) else: # compute usual loss via models loss, logits = model(**inputs, labels=labels, use_cache=False)[:2] else: # compute label smoothed loss logits = model(**inputs, use_cache=False)[0] lprobs = torch.nn.functional.log_softmax(logits, dim=-1) loss, _ = self.loss_fn(lprobs, labels, self.args.label_smoothing, ignore_index=self.config.pad_token_id) return loss, logits def compute_loss(self, model, inputs): labels = inputs.pop("labels") loss, _ = self._compute_loss(model, inputs, labels) return loss def prediction_step( self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]], prediction_loss_only: bool, ignore_keys: Optional[List[str]] = None, ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: """ Perform an evaluation step on :obj:`model` using obj:`inputs`. Subclass and override to inject custom behavior. Args: model (:obj:`nn.Module`): The model to evaluate. inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`): The inputs and targets of the model. The dictionary will be unpacked before being fed to the model. Most models expect the targets under the argument :obj:`labels`. Check your model's documentation for all accepted arguments. prediction_loss_only (:obj:`bool`): Whether or not to return the loss only. Return: Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and labels (each being optional). """ inputs = self._prepare_inputs(inputs) gen_kwargs = { "max_length": self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, "num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: generated_tokens = self.model.generate( inputs["input_ids"], attention_mask=inputs["attention_mask"], **gen_kwargs, ) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_kwargs["max_length"]) labels = inputs.pop("labels") with torch.no_grad(): # compute loss on predict data loss, logits = self._compute_loss(model, inputs, labels) loss = loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) logits = generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: labels = self._pad_tensors_to_max_len(labels, gen_kwargs["max_length"]) return (loss, logits, labels) def _pad_tensors_to_max_len(self, tensor, max_length): # If PAD token is not defined at least EOS token has to be defined pad_token_id = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( "Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be" f" padded to `max_length`={max_length}" ) padded_tensor = pad_token_id * torch.ones( (tensor.shape[0], max_length), dtype=tensor.dtype, device=tensor.device ) padded_tensor[:, : tensor.shape[-1]] = tensor return padded_tensor
transformers/examples/legacy/seq2seq/seq2seq_trainer.py/0
{ "file_path": "transformers/examples/legacy/seq2seq/seq2seq_trainer.py", "repo_id": "transformers", "token_count": 4874 }
289
<!--- Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # VisionTextDualEncoder and CLIP model training examples The following example showcases how to train a CLIP-like vision-text dual encoder model using a pre-trained vision and text encoder. Such a model can be used for natural language image search and potentially zero-shot image classification. The model is inspired by [CLIP](https://openai.com/blog/clip/), introduced by Alec Radford et al. The idea is to train a vision encoder and a text encoder jointly to project the representation of images and their captions into the same embedding space, such that the caption embeddings are located near the embeddings of the images they describe. ### Download COCO dataset (2017) This example uses COCO dataset (2017) through a custom dataset script, which requires users to manually download the COCO dataset before training. ```bash mkdir data cd data wget http://images.cocodataset.org/zips/train2017.zip wget http://images.cocodataset.org/zips/val2017.zip wget http://images.cocodataset.org/zips/test2017.zip wget http://images.cocodataset.org/annotations/annotations_trainval2017.zip wget http://images.cocodataset.org/annotations/image_info_test2017.zip cd .. ``` Having downloaded COCO dataset manually you should be able to load with the `ydshieh/coc_dataset_script` dataset loading script: ```py import os import datasets COCO_DIR = os.path.join(os.getcwd(), "data") ds = datasets.load_dataset("ydshieh/coco_dataset_script", "2017", data_dir=COCO_DIR) ``` ### Create a model from a vision encoder model and a text encoder model Next, we create a [VisionTextDualEncoderModel](https://huggingface.co/docs/transformers/model_doc/vision-text-dual-encoder#visiontextdualencoder). The `VisionTextDualEncoderModel` class lets you load any vision and text encoder model to create a dual encoder. Here is an example of how to load the model using pre-trained vision and text models. ```python3 from transformers import ( VisionTextDualEncoderModel, VisionTextDualEncoderProcessor, AutoTokenizer, AutoImageProcessor ) model = VisionTextDualEncoderModel.from_vision_text_pretrained( "openai/clip-vit-base-patch32", "FacebookAI/roberta-base" ) tokenizer = AutoTokenizer.from_pretrained("FacebookAI/roberta-base") image_processor = AutoImageProcessor.from_pretrained("openai/clip-vit-base-patch32") processor = VisionTextDualEncoderProcessor(image_processor, tokenizer) # save the model and processor model.save_pretrained("clip-roberta") processor.save_pretrained("clip-roberta") ``` This loads both the text and vision encoders using pre-trained weights, the projection layers are randomly initialized except for CLIP's vision model. If you use CLIP to initialize the vision model then the vision projection weights are also loaded using the pre-trained weights. ### Train the model Finally, we can run the example script to train the model: ```bash python examples/pytorch/contrastive-image-text/run_clip.py \ --output_dir ./clip-roberta-finetuned \ --model_name_or_path ./clip-roberta \ --data_dir $PWD/data \ --dataset_name ydshieh/coco_dataset_script \ --dataset_config_name=2017 \ --image_column image_path \ --caption_column caption \ --remove_unused_columns=False \ --do_train --do_eval \ --per_device_train_batch_size="64" \ --per_device_eval_batch_size="64" \ --learning_rate="5e-5" --warmup_steps="0" --weight_decay 0.1 \ --overwrite_output_dir \ --push_to_hub ```
transformers/examples/pytorch/contrastive-image-text/README.md/0
{ "file_path": "transformers/examples/pytorch/contrastive-image-text/README.md", "repo_id": "transformers", "token_count": 1268 }
290
#!/usr/bin/env python # coding=utf-8 # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for sequence to sequence speech recognition. """ # You can also adapt this script on your own sequence to sequence speech # recognition task. Pointers for this are left as comments. import logging import os import sys import warnings from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union import datasets import evaluate import torch from datasets import DatasetDict, load_dataset import transformers from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForSpeechSeq2Seq, AutoProcessor, AutoTokenizer, HfArgumentParser, Seq2SeqTrainer, Seq2SeqTrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.40.0.dev0") require_version("datasets>=1.18.0", "To fix: pip install -r examples/pytorch/speech-recognition/requirements.txt") logger = logging.getLogger(__name__) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) feature_extractor_name: Optional[str] = field( default=None, metadata={"help": "feature extractor name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"}, ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) token: str = field( default=None, metadata={ "help": ( "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token " "generated when running `huggingface-cli login` (stored in `~/.huggingface`)." ) }, ) use_auth_token: bool = field( default=None, metadata={ "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token` instead." }, ) trust_remote_code: bool = field( default=False, metadata={ "help": ( "Whether or not to allow for custom models defined on the Hub in their own modeling files. This option " "should only be set to `True` for repositories you trust and in which you have read the code, as it will " "execute code present on the Hub on your local machine." ) }, ) freeze_feature_encoder: bool = field( default=True, metadata={"help": "Whether to freeze the feature encoder layers of the model."} ) freeze_encoder: bool = field( default=False, metadata={"help": "Whether to freeze the entire encoder of the seq2seq model."} ) forced_decoder_ids: List[List[int]] = field( default=None, metadata={ "help": ( "A list of pairs of integers which indicates a mapping from generation indices to token indices " "that will be forced before sampling. For example, [[0, 123]] means the first generated token " "will always be a token of index 123." ) }, ) suppress_tokens: List[int] = field( default=None, metadata={"help": "A list of tokens that will be suppressed at generation."} ) apply_spec_augment: bool = field( default=False, metadata={ "help": "Whether to apply *SpecAugment* data augmentation to the input features. This is currently only relevant for Wav2Vec2, HuBERT, WavLM and Whisper models." }, ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ dataset_name: str = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) audio_column_name: str = field( default="audio", metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"}, ) text_column_name: str = field( default="text", metadata={"help": "The name of the dataset column containing the text data. Defaults to 'text'"}, ) max_duration_in_seconds: float = field( default=20.0, metadata={ "help": ( "Truncate audio files that are longer than `max_duration_in_seconds` seconds to" " 'max_duration_in_seconds`" ) }, ) min_duration_in_seconds: float = field( default=0.0, metadata={"help": "Filter audio files that are shorter than `min_duration_in_seconds` seconds"} ) preprocessing_only: bool = field( default=False, metadata={ "help": ( "Whether to only do data preprocessing and skip training. This is especially useful when data" " preprocessing errors out in distributed training due to timeout. In this case, one should run the" " preprocessing in a non-distributed setup with `preprocessing_only=True` so that the cached datasets" " can consequently be loaded in distributed training" ) }, ) train_split_name: str = field( default="train", metadata={ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'" }, ) eval_split_name: str = field( default="test", metadata={ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'" }, ) do_lower_case: bool = field( default=True, metadata={"help": "Whether the target text should be lower cased."}, ) language: str = field( default=None, metadata={ "help": ( "Language for multilingual fine-tuning. This argument should be set for multilingual fine-tuning " "only. For English speech recognition, it should be set to `None`." ) }, ) task: str = field( default="transcribe", metadata={"help": "Task, either `transcribe` for speech recognition or `translate` for speech translation."}, ) @dataclass class DataCollatorSpeechSeq2SeqWithPadding: """ Data collator that will dynamically pad the inputs received. Args: processor ([`WhisperProcessor`]) The processor used for processing the data. decoder_start_token_id (`int`) The begin-of-sentence of the decoder. forward_attention_mask (`bool`) Whether to return attention_mask. """ processor: Any decoder_start_token_id: int forward_attention_mask: bool def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: # split inputs and labels since they have to be of different lengths and need # different padding methods model_input_name = self.processor.model_input_names[0] input_features = [{model_input_name: feature[model_input_name]} for feature in features] label_features = [{"input_ids": feature["labels"]} for feature in features] batch = self.processor.feature_extractor.pad(input_features, return_tensors="pt") if self.forward_attention_mask: batch["attention_mask"] = torch.LongTensor([feature["attention_mask"] for feature in features]) labels_batch = self.processor.tokenizer.pad(label_features, return_tensors="pt") # replace padding with -100 to ignore loss correctly labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100) # if bos token is appended in previous tokenization step, # cut bos token here as it's append later anyways if (labels[:, 0] == self.decoder_start_token_id).all().cpu().item(): labels = labels[:, 1:] batch["labels"] = labels return batch def main(): # 1. Parse input arguments # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() if model_args.use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token` instead.", FutureWarning, ) if model_args.token is not None: raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") model_args.token = model_args.use_auth_token # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_speech_recognition_seq2seq", model_args, data_args) # 2. Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() logger.info("Training/evaluation parameters %s", training_args) # 3. Detecting last checkpoint and eventually continue from last checkpoint last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed) # 4. Load dataset raw_datasets = DatasetDict() if training_args.do_train: raw_datasets["train"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=data_args.train_split_name, cache_dir=model_args.cache_dir, token=model_args.token, ) if training_args.do_eval: raw_datasets["eval"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=data_args.eval_split_name, cache_dir=model_args.cache_dir, token=model_args.token, ) if data_args.audio_column_name not in next(iter(raw_datasets.values())).column_names: raise ValueError( f"--audio_column_name '{data_args.audio_column_name}' not found in dataset '{data_args.dataset_name}'. " "Make sure to set `--audio_column_name` to the correct audio column - one of " f"{', '.join(next(iter(raw_datasets.values())).column_names)}." ) if data_args.text_column_name not in next(iter(raw_datasets.values())).column_names: raise ValueError( f"--text_column_name {data_args.text_column_name} not found in dataset '{data_args.dataset_name}'. " "Make sure to set `--text_column_name` to the correct text column - one of " f"{', '.join(next(iter(raw_datasets.values())).column_names)}." ) # 5. Load pretrained model, tokenizer, and feature extractor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) config.update({"forced_decoder_ids": model_args.forced_decoder_ids, "suppress_tokens": model_args.suppress_tokens}) # SpecAugment for whisper models if getattr(config, "model_type", None) == "whisper": config.update({"apply_spec_augment": model_args.apply_spec_augment}) feature_extractor = AutoFeatureExtractor.from_pretrained( model_args.feature_extractor_name if model_args.feature_extractor_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) model = AutoModelForSpeechSeq2Seq.from_pretrained( model_args.model_name_or_path, config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) if model.config.decoder_start_token_id is None: raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined") if model_args.freeze_feature_encoder: model.freeze_feature_encoder() if model_args.freeze_encoder: model.freeze_encoder() model.model.encoder.gradient_checkpointing = False if data_args.language is not None: # We only need to set the task id when the language is specified (i.e. in a multilingual setting) tokenizer.set_prefix_tokens(language=data_args.language, task=data_args.task) # 6. Resample speech dataset if necessary dataset_sampling_rate = next(iter(raw_datasets.values())).features[data_args.audio_column_name].sampling_rate if dataset_sampling_rate != feature_extractor.sampling_rate: raw_datasets = raw_datasets.cast_column( data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate) ) # 7. Preprocessing the datasets. # We need to read the audio files as arrays and tokenize the targets. max_input_length = data_args.max_duration_in_seconds * feature_extractor.sampling_rate min_input_length = data_args.min_duration_in_seconds * feature_extractor.sampling_rate audio_column_name = data_args.audio_column_name num_workers = data_args.preprocessing_num_workers text_column_name = data_args.text_column_name model_input_name = feature_extractor.model_input_names[0] do_lower_case = data_args.do_lower_case # if SpecAugment is used for whisper models, return attention_mask to guide the mask along time axis forward_attention_mask = ( getattr(config, "model_type", None) == "whisper" and getattr(config, "apply_spec_augment", False) and getattr(config, "mask_time_prob", 0) > 0 ) if data_args.max_train_samples is not None: raw_datasets["train"] = raw_datasets["train"].select(range(data_args.max_train_samples)) if data_args.max_eval_samples is not None: raw_datasets["eval"] = raw_datasets["eval"].select(range(data_args.max_eval_samples)) def prepare_dataset(batch): # process audio sample = batch[audio_column_name] inputs = feature_extractor( sample["array"], sampling_rate=sample["sampling_rate"], return_attention_mask=forward_attention_mask ) # process audio length batch[model_input_name] = inputs.get(model_input_name)[0] batch["input_length"] = len(sample["array"]) if forward_attention_mask: batch["attention_mask"] = inputs.get("attention_mask")[0] # process targets input_str = batch[text_column_name].lower() if do_lower_case else batch[text_column_name] batch["labels"] = tokenizer(input_str).input_ids return batch with training_args.main_process_first(desc="dataset map pre-processing"): vectorized_datasets = raw_datasets.map( prepare_dataset, remove_columns=next(iter(raw_datasets.values())).column_names, num_proc=data_args.preprocessing_num_workers, desc="preprocess train dataset", ) # filter data that is shorter than min_input_length or longer than # max_input_length def is_audio_in_length_range(length): return length > min_input_length and length < max_input_length vectorized_datasets = vectorized_datasets.filter( is_audio_in_length_range, num_proc=num_workers, input_columns=["input_length"], ) # for large datasets it is advised to run the preprocessing on a # single machine first with `args.preprocessing_only` since there will mostly likely # be a timeout when running the script in distributed mode. # In a second step `args.preprocessing_only` can then be set to `False` to load the # cached dataset if data_args.preprocessing_only: cache = {k: v.cache_files for k, v in vectorized_datasets.items()} logger.info(f"Data preprocessing finished. Files cached at {cache}.") return # 8. Load Metric metric = evaluate.load("wer", cache_dir=model_args.cache_dir) def compute_metrics(pred): pred_ids = pred.predictions pred.label_ids[pred.label_ids == -100] = tokenizer.pad_token_id pred_str = tokenizer.batch_decode(pred_ids, skip_special_tokens=True) # we do not want to group tokens when computing the metrics label_str = tokenizer.batch_decode(pred.label_ids, skip_special_tokens=True) wer = metric.compute(predictions=pred_str, references=label_str) return {"wer": wer} # 9. Create a single speech processor # make sure all processes wait until data is saved with training_args.main_process_first(): # only the main process saves them if is_main_process(training_args.local_rank): # save feature extractor, tokenizer and config feature_extractor.save_pretrained(training_args.output_dir) tokenizer.save_pretrained(training_args.output_dir) config.save_pretrained(training_args.output_dir) processor = AutoProcessor.from_pretrained(training_args.output_dir) # 10. Define data collator data_collator = DataCollatorSpeechSeq2SeqWithPadding( processor=processor, decoder_start_token_id=model.config.decoder_start_token_id, forward_attention_mask=forward_attention_mask, ) # 11. Initialize Trainer trainer = Seq2SeqTrainer( model=model, args=training_args, train_dataset=vectorized_datasets["train"] if training_args.do_train else None, eval_dataset=vectorized_datasets["eval"] if training_args.do_eval else None, tokenizer=feature_extractor, data_collator=data_collator, compute_metrics=compute_metrics if training_args.predict_with_generate else None, ) # 12. Training if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the feature extractor too for easy upload metrics = train_result.metrics max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(vectorized_datasets["train"]) ) metrics["train_samples"] = min(max_train_samples, len(vectorized_datasets["train"])) trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # 13. Evaluation results = {} if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate( metric_key_prefix="eval", max_length=training_args.generation_max_length, num_beams=training_args.generation_num_beams, ) max_eval_samples = ( data_args.max_eval_samples if data_args.max_eval_samples is not None else len(vectorized_datasets["eval"]) ) metrics["eval_samples"] = min(max_eval_samples, len(vectorized_datasets["eval"])) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) # 14. Write Training Stats kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "automatic-speech-recognition"} if data_args.dataset_name is not None: kwargs["dataset_tags"] = data_args.dataset_name if data_args.dataset_config_name is not None: kwargs["dataset_args"] = data_args.dataset_config_name kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" else: kwargs["dataset"] = data_args.dataset_name if training_args.push_to_hub: trainer.push_to_hub(**kwargs) else: trainer.create_model_card(**kwargs) return results if __name__ == "__main__": main()
transformers/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py/0
{ "file_path": "transformers/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py", "repo_id": "transformers", "token_count": 10176 }
291
#!/usr/bin/env python # coding=utf-8 # Copyright 2022 University of Cambridge, Tencent AI Lab, DeepMind and The University of Hong Kong Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The examples of running contrastive search on the auto-APIs; Running this example: python run_generation_contrastive_search.py --model_name_or_path=openai-community/gpt2-large --penalty_alpha=0.6 --k=4 --length=256 """ import argparse import logging from accelerate import PartialState from accelerate.utils import set_seed from transformers import AutoModelForCausalLM, AutoTokenizer logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger = logging.getLogger(__name__) def main(): parser = argparse.ArgumentParser() parser.add_argument( "--model_name_or_path", default=None, type=str, required=True, ) parser.add_argument("--prompt", type=str, default="") parser.add_argument("--length", type=int, default=20) parser.add_argument("--stop_token", type=str, default=None, help="Token at which text generation is stopped") parser.add_argument( "--temperature", type=float, default=1.0, help="temperature of 1.0 has no effect, lower tend toward greedy sampling", ) parser.add_argument( "--repetition_penalty", type=float, default=1.0, help="primarily useful for CTRL model; in that case, use 1.2" ) parser.add_argument("--k", type=int, default=0) parser.add_argument("--penalty_alpha", type=float, default=0.0) parser.add_argument("--p", type=float, default=0.9) parser.add_argument("--prefix", type=str, default="", help="Text added prior to input.") parser.add_argument("--padding_text", type=str, default="", help="Deprecated, the use of `--prefix` is preferred.") parser.add_argument("--xlm_language", type=str, default="", help="Optional language when used with the XLM model.") parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") parser.add_argument( "--use_cpu", action="store_true", help="Whether or not to use cpu. If set to False, " "we will use gpu/npu or mps device if available", ) parser.add_argument( "--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", ) args = parser.parse_args() # Initialize the distributed state. distributed_state = PartialState(cpu=args.use_cpu) logger.warning(f"device: {distributed_state.device}, 16-bits inference: {args.fp16}") if args.seed is not None: set_seed(args.seed) # Initialize the model and tokenizer tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path) model = AutoModelForCausalLM.from_pretrained(args.model_name_or_path) # tokenizer = GPT2Tokenizer.from_pretrained(args.model_name_or_path) # model = OPTForCausalLM.from_pretrained(args.model_name_or_path) # Set the model to the right device model.to(distributed_state.device) if args.fp16: model.half() logger.info(args) prompt_text = args.prompt if args.prompt else input("Model prompt >>> ") inputs = tokenizer(prompt_text, return_tensors="pt", add_special_tokens=False) inputs = {key: value.to(distributed_state.device) for key, value in inputs.items()} output_sequences = model.generate( **inputs, max_length=args.length + len(inputs["input_ids"][0]), penalty_alpha=args.penalty_alpha, top_k=args.k, ) generated_sequences = [] for generated_sequence_idx, generated_sequence in enumerate(output_sequences): print(f"=== GENERATED SEQUENCE {generated_sequence_idx + 1} ===") generated_sequence = generated_sequence.tolist() # Decode text text = tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True, add_special_tokens=False) # Remove all text after the stop token text = text[: text.find(args.stop_token) if args.stop_token else None] # Add the prompt at the beginning of the sequence. Remove the excess text that was used for pre-processing total_sequence = ( prompt_text + text[len(tokenizer.decode(inputs["input_ids"][0], clean_up_tokenization_spaces=True)) :] ) generated_sequences.append(total_sequence) print(total_sequence) return generated_sequences if __name__ == "__main__": main()
transformers/examples/pytorch/text-generation/run_generation_contrastive_search.py/0
{ "file_path": "transformers/examples/pytorch/text-generation/run_generation_contrastive_search.py", "repo_id": "transformers", "token_count": 1870 }
292
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) logger = logging.getLogger(__name__) @dataclass(frozen=True) class InputExample: """ A single training/test example for simple sequence classification. Args: guid: Unique id for the example. text_a: string. The untokenized text of the first sequence. For single sequence tasks, only this sequence must be specified. text_b: (Optional) string. The untokenized text of the second sequence. Only must be specified for sequence pair tasks. label: (Optional) string. The label of the example. This should be specified for train and dev examples, but not for test examples. pairID: (Optional) string. Unique identifier for the pair of sentences. """ guid: str text_a: str text_b: Optional[str] = None label: Optional[str] = None pairID: Optional[str] = None @dataclass(frozen=True) class InputFeatures: """ A single set of features of data. Property names are the same names as the corresponding inputs to a model. Args: input_ids: Indices of input sequence tokens in the vocabulary. attention_mask: Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: Usually ``1`` for tokens that are NOT MASKED, ``0`` for MASKED (padded) tokens. token_type_ids: (Optional) Segment token indices to indicate first and second portions of the inputs. Only some models use them. label: (Optional) Label corresponding to the input. Int for classification problems, float for regression problems. pairID: (Optional) Unique identifier for the pair of sentences. """ input_ids: List[int] attention_mask: Optional[List[int]] = None token_type_ids: Optional[List[int]] = None label: Optional[Union[int, float]] = None pairID: Optional[int] = None if is_torch_available(): import torch from torch.utils.data import Dataset class HansDataset(Dataset): """ This will be superseded by a framework-agnostic approach soon. """ features: List[InputFeatures] def __init__( self, data_dir: str, tokenizer: PreTrainedTokenizer, task: str, max_seq_length: Optional[int] = None, overwrite_cache=False, evaluate: bool = False, ): processor = hans_processors[task]() cached_features_file = os.path.join( data_dir, "cached_{}_{}_{}_{}".format( "dev" if evaluate else "train", tokenizer.__class__.__name__, str(max_seq_length), task, ), ) label_list = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) label_list[1], label_list[2] = label_list[2], label_list[1] self.label_list = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lock_path = cached_features_file + ".lock" with FileLock(lock_path): if os.path.exists(cached_features_file) and not overwrite_cache: logger.info(f"Loading features from cached file {cached_features_file}") self.features = torch.load(cached_features_file) else: logger.info(f"Creating features from dataset file at {data_dir}") examples = ( processor.get_dev_examples(data_dir) if evaluate else processor.get_train_examples(data_dir) ) logger.info("Training examples: %s", len(examples)) self.features = hans_convert_examples_to_features(examples, label_list, max_seq_length, tokenizer) logger.info("Saving features into cached file %s", cached_features_file) torch.save(self.features, cached_features_file) def __len__(self): return len(self.features) def __getitem__(self, i) -> InputFeatures: return self.features[i] def get_labels(self): return self.label_list if is_tf_available(): import tensorflow as tf class TFHansDataset: """ This will be superseded by a framework-agnostic approach soon. """ features: List[InputFeatures] def __init__( self, data_dir: str, tokenizer: PreTrainedTokenizer, task: str, max_seq_length: Optional[int] = 128, overwrite_cache=False, evaluate: bool = False, ): processor = hans_processors[task]() label_list = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) label_list[1], label_list[2] = label_list[2], label_list[1] self.label_list = label_list examples = processor.get_dev_examples(data_dir) if evaluate else processor.get_train_examples(data_dir) self.features = hans_convert_examples_to_features(examples, label_list, max_seq_length, tokenizer) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features), desc="convert examples to features"): if ex_index % 10000 == 0: logger.info("Writing example %d of %d" % (ex_index, len(examples))) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) self.dataset = tf.data.Dataset.from_generator( gen, ( { "example_id": tf.int32, "input_ids": tf.int32, "attention_mask": tf.int32, "token_type_ids": tf.int32, }, tf.int64, ), ( { "example_id": tf.TensorShape([]), "input_ids": tf.TensorShape([None, None]), "attention_mask": tf.TensorShape([None, None]), "token_type_ids": tf.TensorShape([None, None]), }, tf.TensorShape([]), ), ) def get_dataset(self): return self.dataset def __len__(self): return len(self.features) def __getitem__(self, i) -> InputFeatures: return self.features[i] def get_labels(self): return self.label_list class HansProcessor(DataProcessor): """Processor for the HANS data set.""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "heuristics_train_set.txt")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "heuristics_evaluation_set.txt")), "dev") def get_labels(self): """See base class. Note that we follow the standard three labels for MNLI (see :class:`~transformers.data.processors.utils.MnliProcessor`) but the HANS evaluation groups `contradiction` and `neutral` into `non-entailment` (label 0) while `entailment` is label 1.""" return ["contradiction", "entailment", "neutral"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for i, line in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, line[0]) text_a = line[5] text_b = line[6] pairID = line[7][2:] if line[7].startswith("ex") else line[7] label = line[0] examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label, pairID=pairID)) return examples def hans_convert_examples_to_features( examples: List[InputExample], label_list: List[str], max_length: int, tokenizer: PreTrainedTokenizer, ): """ Loads a data file into a list of ``InputFeatures`` Args: examples: List of ``InputExamples`` containing the examples. label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method. max_length: Maximum example length. tokenizer: Instance of a tokenizer that will tokenize the examples. Returns: A list of task-specific ``InputFeatures`` which can be fed to the model. """ label_map = {label: i for i, label in enumerate(label_list)} features = [] for ex_index, example in tqdm.tqdm(enumerate(examples), desc="convert examples to features"): if ex_index % 10000 == 0: logger.info("Writing example %d" % (ex_index)) inputs = tokenizer( example.text_a, example.text_b, add_special_tokens=True, max_length=max_length, padding="max_length", truncation=True, return_overflowing_tokens=True, ) label = label_map[example.label] if example.label in label_map else 0 pairID = int(example.pairID) features.append(InputFeatures(**inputs, label=label, pairID=pairID)) for i, example in enumerate(examples[:5]): logger.info("*** Example ***") logger.info(f"guid: {example}") logger.info(f"features: {features[i]}") return features hans_tasks_num_labels = { "hans": 3, } hans_processors = { "hans": HansProcessor, }
transformers/examples/research_projects/adversarial/utils_hans.py/0
{ "file_path": "transformers/examples/research_projects/adversarial/utils_hans.py", "repo_id": "transformers", "token_count": 5431 }
293
import os from collections import deque import torch from torch.utils.data import Dataset # ------------ # Data loading # ------------ class CNNDMDataset(Dataset): """Abstracts the dataset used to train seq2seq models. The class will process the documents that are located in the specified folder. The preprocessing will work on any document that is reasonably formatted. On the CNN/DailyMail dataset it will extract both the story and the summary. CNN/Daily News: The CNN/Daily News raw datasets are downloaded from [1]. The stories are stored in different files; the summary appears at the end of the story as sentences that are prefixed by the special `@highlight` line. To process the data, untar both datasets in the same folder, and pass the path to this folder as the "data_dir argument. The formatting code was inspired by [2]. [1] https://cs.nyu.edu/~kcho/ [2] https://github.com/abisee/cnn-dailymail/ """ def __init__(self, path="", prefix="train"): """We initialize the class by listing all the documents to summarize. Files are not read in memory due to the size of some datasets (like CNN/DailyMail). """ assert os.path.isdir(path) self.documents = [] story_filenames_list = os.listdir(path) for story_filename in story_filenames_list: if "summary" in story_filename: continue path_to_story = os.path.join(path, story_filename) if not os.path.isfile(path_to_story): continue self.documents.append(path_to_story) def __len__(self): """Returns the number of documents.""" return len(self.documents) def __getitem__(self, idx): document_path = self.documents[idx] document_name = document_path.split("/")[-1] with open(document_path, encoding="utf-8") as source: raw_story = source.read() story_lines, summary_lines = process_story(raw_story) return document_name, story_lines, summary_lines def process_story(raw_story): """Extract the story and summary from a story file. Arguments: raw_story (str): content of the story file as an utf-8 encoded string. Raises: IndexError: If the story is empty or contains no highlights. """ nonempty_lines = list(filter(lambda x: len(x) != 0, [line.strip() for line in raw_story.split("\n")])) # for some unknown reason some lines miss a period, add it nonempty_lines = [_add_missing_period(line) for line in nonempty_lines] # gather article lines story_lines = [] lines = deque(nonempty_lines) while True: try: element = lines.popleft() if element.startswith("@highlight"): break story_lines.append(element) except IndexError: # if "@highlight" is absent from the file we pop # all elements until there is None, raising an exception. return story_lines, [] # gather summary lines summary_lines = list(filter(lambda t: not t.startswith("@highlight"), lines)) return story_lines, summary_lines def _add_missing_period(line): END_TOKENS = [".", "!", "?", "...", "'", "`", '"', "\u2019", "\u2019", ")"] if line.startswith("@highlight"): return line if line[-1] in END_TOKENS: return line return line + "." # -------------------------- # Encoding and preprocessing # -------------------------- def truncate_or_pad(sequence, block_size, pad_token_id): """Adapt the source and target sequences' lengths to the block size. If the sequence is shorter we append padding token to the right of the sequence. """ if len(sequence) > block_size: return sequence[:block_size] else: sequence.extend([pad_token_id] * (block_size - len(sequence))) return sequence def build_mask(sequence, pad_token_id): """Builds the mask. The attention mechanism will only attend to positions with value 1.""" mask = torch.ones_like(sequence) idx_pad_tokens = sequence == pad_token_id mask[idx_pad_tokens] = 0 return mask def encode_for_summarization(story_lines, summary_lines, tokenizer): """Encode the story and summary lines, and join them as specified in [1] by using `[SEP] [CLS]` tokens to separate sentences. """ story_lines_token_ids = [tokenizer.encode(line) for line in story_lines] story_token_ids = [token for sentence in story_lines_token_ids for token in sentence] summary_lines_token_ids = [tokenizer.encode(line) for line in summary_lines] summary_token_ids = [token for sentence in summary_lines_token_ids for token in sentence] return story_token_ids, summary_token_ids def compute_token_type_ids(batch, separator_token_id): """Segment embeddings as described in [1] The values {0,1} were found in the repository [2]. Attributes: batch: torch.Tensor, size [batch_size, block_size] Batch of input. separator_token_id: int The value of the token that separates the segments. [1] Liu, Yang, and Mirella Lapata. "Text summarization with pretrained encoders." arXiv preprint arXiv:1908.08345 (2019). [2] https://github.com/nlpyang/PreSumm (/src/prepro/data_builder.py, commit fac1217) """ batch_embeddings = [] for sequence in batch: sentence_num = -1 embeddings = [] for s in sequence: if s == separator_token_id: sentence_num += 1 embeddings.append(sentence_num % 2) batch_embeddings.append(embeddings) return torch.tensor(batch_embeddings)
transformers/examples/research_projects/bertabs/utils_summarization.py/0
{ "file_path": "transformers/examples/research_projects/bertabs/utils_summarization.py", "repo_id": "transformers", "token_count": 2180 }
294
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def tokenize(example): output = {} output["input_ids"] = tokenizer(example["content"], truncation=False)["input_ids"] output["ratio_char_token"] = len(example["content"]) / len(output["input_ids"]) return output parser = HfArgumentParser(PretokenizationArguments) args = parser.parse_args() if args.num_workers is None: args.num_workers = multiprocessing.cpu_count() tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_dir) t_start = time.time() ds = load_dataset(args.dataset_name, split="train") print(f"Dataset loaded in {time.time()-t_start:.2f}s") t_start = time.time() ds = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ "repo_name", "path", "copies", "size", "content", "license", "hash", "line_mean", "line_max", "alpha_frac", "autogenerated", ], ) print(f"Dataset tokenized in {time.time()-t_start:.2f}s") t_start = time.time() ds.push_to_hub(args.tokenized_data_repo) print(f"Data pushed to the hub in {time.time()-t_start:.2f}s")
transformers/examples/research_projects/codeparrot/scripts/pretokenizing.py/0
{ "file_path": "transformers/examples/research_projects/codeparrot/scripts/pretokenizing.py", "repo_id": "transformers", "token_count": 527 }
295
# Distil* Author: @VictorSanh This folder contains the original code used to train Distil* as well as examples showcasing how to use DistilBERT, DistilRoBERTa and DistilGPT2. **January 20, 2020 - Bug fixing** We have recently discovered and fixed [a bug](https://github.com/huggingface/transformers/commit/48cbf267c988b56c71a2380f748a3e6092ccaed3) in the evaluation of our `run_*.py` scripts that caused the reported metrics to be over-estimated on average. We have updated all the metrics with the latest runs. **December 6, 2019 - Update** We release **DistilmBERT**: 92% of `bert-base-multilingual-cased` on XNLI. The model supports 104 different languages listed [here](https://github.com/google-research/bert/blob/master/multilingual.md#list-of-languages). **November 19, 2019 - Update** We release German **DistilBERT**: 98.8% of `bert-base-german-dbmdz-cased` on NER tasks. **October 23, 2019 - Update** We release **DistilRoBERTa**: 95% of `RoBERTa-base`'s performance on GLUE, twice as fast as RoBERTa while being 35% smaller. **October 3, 2019 - Update** We release our [NeurIPS workshop paper](https://arxiv.org/abs/1910.01108) explaining our approach on **DistilBERT**. It includes updated results and further experiments. We applied the same method to GPT2 and release the weights of **DistilGPT2**. DistilGPT2 is two times faster and 33% smaller than GPT2. **The paper supersedes our [previous blogpost](https://medium.com/huggingface/distilbert-8cf3380435b5) with a different distillation loss and better performances. Please use the paper as a reference when comparing/reporting results on DistilBERT.** **September 19, 2019 - Update:** We fixed bugs in the code and released an updated version of the weights trained with a modification of the distillation loss. DistilBERT now reaches 99% of `BERT-base`'s performance on GLUE, and 86.9 F1 score on SQuAD v1.1 dev set (compared to 88.5 for `BERT-base`). We will publish a formal write-up of our approach in the near future! ## What is Distil* Distil* is a class of compressed models that started with DistilBERT. DistilBERT stands for Distilled-BERT. DistilBERT is a small, fast, cheap and light Transformer model based on Bert architecture. It has 40% less parameters than `bert-base-uncased`, runs 60% faster while preserving 97% of BERT's performances as measured on the GLUE language understanding benchmark. DistilBERT is trained using knowledge distillation, a technique to compress a large model called the teacher into a smaller model called the student. By distillating Bert, we obtain a smaller Transformer model that bears a lot of similarities with the original BERT model while being lighter, smaller and faster to run. DistilBERT is thus an interesting option to put large-scaled trained Transformer model into production. We have applied the same method to other Transformer architectures and released the weights: - GPT2: on the [WikiText-103](https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/) benchmark, GPT2 reaches a perplexity on the test set of 16.3 compared to 21.1 for **DistilGPT2** (after fine-tuning on the train set). - RoBERTa: **DistilRoBERTa** reaches 95% of `RoBERTa-base`'s performance on GLUE while being twice faster and 35% smaller. - German BERT: **German DistilBERT** reaches 99% of `bert-base-german-dbmdz-cased`'s performance on German NER (CoNLL-2003). - Multilingual BERT: **DistilmBERT** reaches 92% of Multilingual BERT's performance on XNLI while being twice faster and 25% smaller. The model supports 104 languages listed [here](https://github.com/google-research/bert/blob/master/multilingual.md#list-of-languages). For more information on DistilBERT, please refer to our [NeurIPS workshop paper](https://arxiv.org/abs/1910.01108). Here are the results on the dev sets of GLUE: | Model | Macro-score | CoLA | MNLI | MRPC | QNLI | QQP | RTE | SST-2| STS-B| WNLI | | :---: | :---: | :---:| :---:| :---:| :---:| :---:| :---:| :---:| :---:| :---: | | BERT-base-uncased | **79.5** | 56.3 | 84.7 | 88.6 | 91.8 | 89.6 | 69.3 | 92.7 | 89.0 | 53.5 | | DistilBERT-base-uncased | **77.0** | 51.3 | 82.1 | 87.5 | 89.2 | 88.5 | 59.9 | 91.3 | 86.9 | 56.3 | | BERT-base-cased | **78.2** | 58.2 | 83.9 | 87.8 | 91.0 | 89.2 | 66.1 | 91.7 | 89.2 | 46.5 | | DistilBERT-base-cased | **75.9** | 47.2 | 81.5 | 85.6 | 88.2 | 87.8 | 60.6 | 90.4 | 85.5 | 56.3 | | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | | RoBERTa-base (reported) | **83.2**/**86.4**<sup>2</sup> | 63.6 | 87.6 | 90.2 | 92.8 | 91.9 | 78.7 | 94.8 | 91.2 | 57.7<sup>3</sup> | | DistilRoBERTa<sup>1</sup> | **79.0**/**82.3**<sup>2</sup> | 59.3 | 84.0 | 86.6 | 90.8 | 89.4 | 67.9 | 92.5 | 88.3 | 52.1 | <sup>1</sup> We did not use the MNLI checkpoint for fine-tuning but directly perform transfer learning on the pre-trained DistilRoBERTa. <sup>2</sup> Macro-score computed without WNLI. <sup>3</sup> We compute this score ourselves for completeness. Here are the results on the *test* sets for 6 of the languages available in XNLI. The results are computed in the zero shot setting (trained on the English portion and evaluated on the target language portion): | Model | English | Spanish | Chinese | German | Arabic | Urdu | | :---: | :---: | :---: | :---: | :---: | :---: | :---:| | mBERT base cased (computed) | 82.1 | 74.6 | 69.1 | 72.3 | 66.4 | 58.5 | | mBERT base uncased (reported)| 81.4 | 74.3 | 63.8 | 70.5 | 62.1 | 58.3 | | DistilmBERT | 78.2 | 69.1 | 64.0 | 66.3 | 59.1 | 54.7 | ## Setup This part of the library has only be tested with Python3.6+. There are few specific dependencies to install before launching a distillation, you can install them with the command `pip install -r requirements.txt`. **Important note:** The training scripts have been updated to support PyTorch v1.2.0 (there are breaking changes compared to v1.1.0). ## How to use DistilBERT Transformers includes five pre-trained Distil* models, currently only provided for English and German (we are investigating the possibility to train and release a multilingual version of DistilBERT): - `distilbert-base-uncased`: DistilBERT English language model pretrained on the same data used to pretrain Bert (concatenation of the Toronto Book Corpus and full English Wikipedia) using distillation with the supervision of the `bert-base-uncased` version of Bert. The model has 6 layers, 768 dimension and 12 heads, totalizing 66M parameters. - `distilbert-base-uncased-distilled-squad`: A finetuned version of `distilbert-base-uncased` finetuned using (a second step of) knowledge distillation on SQuAD 1.0. This model reaches a F1 score of 86.9 on the dev set (for comparison, Bert `bert-base-uncased` version reaches a 88.5 F1 score). - `distilbert-base-cased`: DistilBERT English language model pretrained on the same data used to pretrain Bert (concatenation of the Toronto Book Corpus and full English Wikipedia) using distillation with the supervision of the `bert-base-cased` version of Bert. The model has 6 layers, 768 dimension and 12 heads, totalizing 65M parameters. - `distilbert-base-cased-distilled-squad`: A finetuned version of `distilbert-base-cased` finetuned using (a second step of) knowledge distillation on SQuAD 1.0. This model reaches a F1 score of 87.1 on the dev set (for comparison, Bert `bert-base-cased` version reaches a 88.7 F1 score). - `distilbert-base-german-cased`: DistilBERT German language model pretrained on 1/2 of the data used to pretrain Bert using distillation with the supervision of the `bert-base-german-dbmdz-cased` version of German DBMDZ Bert. For NER tasks the model reaches a F1 score of 83.49 on the CoNLL-2003 test set (for comparison, `bert-base-german-dbmdz-cased` reaches a 84.52 F1 score), and a F1 score of 85.23 on the GermEval 2014 test set (`bert-base-german-dbmdz-cased` reaches a 86.89 F1 score). - `distilgpt2`: DistilGPT2 English language model pretrained with the supervision of `gpt2` (the smallest version of GPT2) on [OpenWebTextCorpus](https://skylion007.github.io/OpenWebTextCorpus/), a reproduction of OpenAI's WebText dataset. The model has 6 layers, 768 dimension and 12 heads, totalizing 82M parameters (compared to 124M parameters for GPT2). On average, DistilGPT2 is two times faster than GPT2. - `distilroberta-base`: DistilRoBERTa English language model pretrained with the supervision of `roberta-base` solely on [OpenWebTextCorpus](https://skylion007.github.io/OpenWebTextCorpus/), a reproduction of OpenAI's WebText dataset (it is ~4 times less training data than the teacher RoBERTa). The model has 6 layers, 768 dimension and 12 heads, totalizing 82M parameters (compared to 125M parameters for RoBERTa-base). On average DistilRoBERTa is twice as fast as Roberta-base. - `distilbert-base-multilingual-cased`: DistilmBERT multilingual model pretrained with the supervision of `bert-base-multilingual-cased` on the concatenation of Wikipedia in 104 different languages. The model supports the 104 languages listed [here](https://github.com/google-research/bert/blob/master/multilingual.md#list-of-languages). The model has 6 layers, 768 dimension and 12 heads, totalizing 134M parameters (compared to 177M parameters for mBERT-base). On average DistilmBERT is twice as fast as mBERT-base. Using DistilBERT is very similar to using BERT. DistilBERT share the same tokenizer as BERT's `bert-base-uncased` even though we provide a link to this tokenizer under the `DistilBertTokenizer` name to have a consistent naming between the library models. ```python tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-cased') model = DistilBertModel.from_pretrained('distilbert-base-cased') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) outputs = model(input_ids) last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple ``` Similarly, using the other Distil* models simply consists in calling the base classes with a different pretrained checkpoint: - DistilBERT uncased: `model = DistilBertModel.from_pretrained('distilbert-base-uncased')` - DistilGPT2: `model = GPT2Model.from_pretrained('distilgpt2')` - DistilRoBERTa: `model = RobertaModel.from_pretrained('distilroberta-base')` - DistilmBERT: `model = DistilBertModel.from_pretrained('distilbert-base-multilingual-cased')` ## How to train Distil* In the following, we will explain how you can train DistilBERT. ### A. Preparing the data The weights we release are trained using a concatenation of Toronto Book Corpus and English Wikipedia (same training data as the English version of BERT). To avoid processing the data several time, we do it once and for all before the training. From now on, will suppose that you have a text file `dump.txt` which contains one sequence per line (a sequence being composed of one of several coherent sentences). First, we will binarize the data, i.e. tokenize the data and convert each token in an index in our model's vocabulary. ```bash python scripts/binarized_data.py \ --file_path data/dump.txt \ --tokenizer_type bert \ --tokenizer_name bert-base-uncased \ --dump_file data/binarized_text ``` Our implementation of masked language modeling loss follows [XLM](https://github.com/facebookresearch/XLM)'s one and smooths the probability of masking with a factor that put more emphasis on rare words. Thus we count the occurrences of each tokens in the data: ```bash python scripts/token_counts.py \ --data_file data/binarized_text.bert-base-uncased.pickle \ --token_counts_dump data/token_counts.bert-base-uncased.pickle \ --vocab_size 30522 ``` ### B. Training Training with distillation is really simple once you have pre-processed the data: ```bash python train.py \ --student_type distilbert \ --student_config training_configs/distilbert-base-uncased.json \ --teacher_type bert \ --teacher_name bert-base-uncased \ --alpha_ce 5.0 --alpha_mlm 2.0 --alpha_cos 1.0 --alpha_clm 0.0 --mlm \ --freeze_pos_embs \ --dump_path serialization_dir/my_first_training \ --data_file data/binarized_text.bert-base-uncased.pickle \ --token_counts data/token_counts.bert-base-uncased.pickle \ --force # overwrites the `dump_path` if it already exists. ``` By default, this will launch a training on a single GPU (even if more are available on the cluster). Other parameters are available in the command line, please look in `train.py` or run `python train.py --help` to list them. We highly encourage you to use distributed training for training DistilBERT as the training corpus is quite large. Here's an example that runs a distributed training on a single node having 4 GPUs: ```bash export NODE_RANK=0 export N_NODES=1 export N_GPU_NODE=4 export WORLD_SIZE=4 export MASTER_PORT=<AN_OPEN_PORT> export MASTER_ADDR=<I.P.> pkill -f 'python -u train.py' python -m torch.distributed.launch \ --nproc_per_node=$N_GPU_NODE \ --nnodes=$N_NODES \ --node_rank $NODE_RANK \ --master_addr $MASTER_ADDR \ --master_port $MASTER_PORT \ train.py \ --force \ --n_gpu $WORLD_SIZE \ --student_type distilbert \ --student_config training_configs/distilbert-base-uncased.json \ --teacher_type bert \ --teacher_name bert-base-uncased \ --alpha_ce 0.33 --alpha_mlm 0.33 --alpha_cos 0.33 --alpha_clm 0.0 --mlm \ --freeze_pos_embs \ --dump_path serialization_dir/my_first_training \ --data_file data/binarized_text.bert-base-uncased.pickle \ --token_counts data/token_counts.bert-base-uncased.pickle ``` **Tips:** Starting distilled training with good initialization of the model weights is crucial to reach decent performance. In our experiments, we initialized our model from a few layers of the teacher (Bert) itself! Please refer to `scripts/extract.py` and `scripts/extract_distilbert.py` to create a valid initialization checkpoint and use `--student_pretrained_weights` argument to use this initialization for the distilled training! Happy distillation! ## Citation If you find the resource useful, you should cite the following paper: ```bibtex @inproceedings{sanh2019distilbert, title={DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter}, author={Sanh, Victor and Debut, Lysandre and Chaumond, Julien and Wolf, Thomas}, booktitle={NeurIPS EMC^2 Workshop}, year={2019} } ```
transformers/examples/research_projects/distillation/README.md/0
{ "file_path": "transformers/examples/research_projects/distillation/README.md", "repo_id": "transformers", "token_count": 5075 }
296
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team and Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utils to train DistilBERT adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM) """ import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger = logging.getLogger(__name__) def git_log(folder_path: str): """ Log commit info. """ repo = git.Repo(search_parent_directories=True) repo_infos = { "repo_id": str(repo), "repo_sha": str(repo.head.object.hexsha), "repo_branch": str(repo.active_branch), } with open(os.path.join(folder_path, "git_log.json"), "w") as f: json.dump(repo_infos, f, indent=4) def init_gpu_params(params): """ Handle single and multi-GPU / multi-node. """ if params.n_gpu <= 0: params.local_rank = 0 params.master_port = -1 params.is_master = True params.multi_gpu = False return assert torch.cuda.is_available() logger.info("Initializing GPUs") if params.n_gpu > 1: assert params.local_rank != -1 params.world_size = int(os.environ["WORLD_SIZE"]) params.n_gpu_per_node = int(os.environ["N_GPU_NODE"]) params.global_rank = int(os.environ["RANK"]) # number of nodes / node ID params.n_nodes = params.world_size // params.n_gpu_per_node params.node_id = params.global_rank // params.n_gpu_per_node params.multi_gpu = True assert params.n_nodes == int(os.environ["N_NODES"]) assert params.node_id == int(os.environ["NODE_RANK"]) # local job (single GPU) else: assert params.local_rank == -1 params.n_nodes = 1 params.node_id = 0 params.local_rank = 0 params.global_rank = 0 params.world_size = 1 params.n_gpu_per_node = 1 params.multi_gpu = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode params.is_master = params.node_id == 0 and params.local_rank == 0 params.multi_node = params.n_nodes > 1 # summary PREFIX = f"--- Global rank: {params.global_rank} - " logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes) logger.info(PREFIX + "Node ID : %i" % params.node_id) logger.info(PREFIX + "Local rank : %i" % params.local_rank) logger.info(PREFIX + "World size : %i" % params.world_size) logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node) logger.info(PREFIX + "Master : %s" % str(params.is_master)) logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node)) logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu)) logger.info(PREFIX + "Hostname : %s" % socket.gethostname()) # set GPU device torch.cuda.set_device(params.local_rank) # initialize multi-GPU if params.multi_gpu: logger.info("Initializing PyTorch distributed") torch.distributed.init_process_group( init_method="env://", backend="nccl", ) def set_seed(args): """ Set the random seed. """ np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed)
transformers/examples/research_projects/distillation/utils.py/0
{ "file_path": "transformers/examples/research_projects/distillation/utils.py", "repo_id": "transformers", "token_count": 1772 }
297
Author: [@vasudevgupta7](https://github.com/thevasudevgupta/) ## Intro In this project, we fine-tuned [**BigBird**](https://arxiv.org/abs/2007.14062) on [**natural-questions**](https://huggingface.co/datasets/natural_questions) dataset for **question-answering** task on long documents. **BigBird**, is a **sparse-attention based transformer** which extends Transformer based models, such as BERT to much **longer sequences**. Read more about BigBird at https://huggingface.co/blog/big-bird ## Fine-tuning **Setup** You need to install jax yourself by following the official docs ([refer this](https://github.com/google/jax#installation)). Other requirements for this project can be installed by running following command: ```shell pip3 install -qr requirements.txt ``` **Download & prepare dataset** The Natural Questions corpus contains questions from real users, and it requires QA systems to read and comprehend an entire Wikipedia article that may or may not contain the answer to the question. This corpus takes ~100 GB on disk. We have used HuggingFace datasets to download & process the dataset. ```shell # just run following CMD python3 prepare_natural_questions.py # this will download the whole dataset from HuggingFace Hub & will make it ready for training # this script takes ~3 hours to process the dataset ``` **Launch Training** We have trained on Cloud's TPU v3-8. Each epoch took around 4.5 hours and the model got converged in just 2 epochs. You can see complete training args in [this script](bigbird_flax.py). ```shell # just run following CMD python3 train.py # In case, you want to try hparams tuning, you can run wandb sweep wandb sweep --project=bigbird sweep_flax.yaml wandb agent <agent-id-obtained-by-above-CMD> ``` ## Evaluation Our evaluation script is different from the original script and we are evaluating sequences with length up to 4096 for simplicity. We managed to get the **EM score of ~55.2** using our evaluation script. ```shell # download validation-dataset first mkdir natural-questions-validation wget https://huggingface.co/datasets/vasudevgupta/natural-questions-validation/resolve/main/natural_questions-validation.arrow -P natural-questions-validation wget https://huggingface.co/datasets/vasudevgupta/natural-questions-validation/resolve/main/dataset_info.json -P natural-questions-validation wget https://huggingface.co/datasets/vasudevgupta/natural-questions-validation/resolve/main/state.json -P natural-questions-validation # simply run following command python3 evaluate.py ``` You can find our checkpoint on HuggingFace Hub ([see this](https://huggingface.co/vasudevgupta/flax-bigbird-natural-questions)). In case you are interested in PyTorch BigBird fine-tuning, you can refer to [this repository](https://github.com/thevasudevgupta/bigbird).
transformers/examples/research_projects/jax-projects/big_bird/README.md/0
{ "file_path": "transformers/examples/research_projects/jax-projects/big_bird/README.md", "repo_id": "transformers", "token_count": 824 }
298
#!/usr/bin/env python # coding=utf-8 # Copyright 2021 The HuggingFace Team All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Pre-training/Fine-tuning the GPTNeo model for causal language modeling on a text file or a dataset using model parallelism. """ import logging import math import os import sys import time from dataclasses import dataclass, field from itertools import chain from pathlib import Path from typing import Callable, Optional import datasets import jax import jax.numpy as jnp import numpy as np import optax from datasets import Dataset, load_dataset from flax.core.frozen_dict import freeze, unfreeze from flax.training.common_utils import onehot, stack_forest from jax.experimental.maps import mesh from jax.experimental.pjit import pjit from partitions import set_partitions from tqdm import tqdm import transformers from transformers import ( CONFIG_MAPPING, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING, AutoConfig, AutoTokenizer, FlaxAutoModelForCausalLM, HfArgumentParser, TrainingArguments, is_tensorboard_available, ) from transformers.testing_utils import CaptureLogger logger = logging.getLogger(__name__) MODEL_CONFIG_CLASSES = list(FLAX_MODEL_FOR_CAUSAL_LM_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. """ model_name_or_path: Optional[str] = field( default=None, metadata={ "help": ( "The model checkpoint for weights initialization. Don't set if you want to train a model from scratch." ) }, ) model_type: Optional[str] = field( default=None, metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)}, ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) dtype: Optional[str] = field( default="float32", metadata={ "help": ( "Floating-point format in which the model weights should be initialized and trained. Choose one of" " `[float32, float16, bfloat16]`." ) }, ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ dataset_name: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."}) validation_file: Optional[str] = field( default=None, metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) validation_split_percentage: Optional[int] = field( default=5, metadata={ "help": "The percentage of the train set used as validation set in case there's no validation split" }, ) block_size: Optional[int] = field( default=None, metadata={ "help": ( "Optional input sequence length after tokenization. " "The training dataset will be truncated in block of this size for training. " "Default to the model max input length for single sentence inputs (take into account special tokens)." ) }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) def __post_init__(self): if self.dataset_name is None and self.train_file is None and self.validation_file is None: raise ValueError("Need either a dataset name or a training/validation file.") else: if self.train_file is not None: extension = self.train_file.split(".")[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: extension = self.validation_file.split(".")[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." def data_loader(rng: jax.random.PRNGKey, dataset: Dataset, batch_size: int, shuffle: bool = False): """ Returns batches of size `batch_size` from truncated `dataset`, sharded over all local devices. Shuffle batches if `shuffle` is `True`. """ steps_per_epoch = len(dataset) // batch_size if shuffle: batch_idx = jax.random.permutation(rng, len(dataset)) else: batch_idx = jnp.arange(len(dataset)) batch_idx = batch_idx[: steps_per_epoch * batch_size] # Skip incomplete batch. batch_idx = batch_idx.reshape((steps_per_epoch, batch_size)) for idx in batch_idx: batch = dataset[idx] batch = {k: jnp.array(v) for k, v in batch.items()} yield batch def write_train_metric(summary_writer, train_metrics, train_time, step): summary_writer.scalar("train_time", train_time, step) train_metrics = stack_forest(train_metrics) for key, vals in train_metrics.items(): tag = f"train_{key}" for i, val in enumerate(vals): summary_writer.scalar(tag, val, step - len(vals) + i + 1) def write_eval_metric(summary_writer, eval_metrics, step): for metric_name, value in eval_metrics.items(): summary_writer.scalar(f"eval_{metric_name}", value, step) def create_learning_rate_fn( train_ds_size: int, train_batch_size: int, num_train_epochs: int, num_warmup_steps: int, learning_rate: float ) -> Callable[[int], jnp.ndarray]: """Returns a linear warmup, linear_decay learning rate function.""" steps_per_epoch = train_ds_size // train_batch_size num_train_steps = steps_per_epoch * num_train_epochs warmup_fn = optax.linear_schedule(init_value=0.0, end_value=learning_rate, transition_steps=num_warmup_steps) decay_fn = optax.linear_schedule( init_value=learning_rate, end_value=0, transition_steps=num_train_steps - num_warmup_steps ) schedule_fn = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[num_warmup_steps]) return schedule_fn def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR) if jax.process_index() == 0: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # Set the verbosity to info of the Transformers logger (on main process only): logger.info(f"Training/evaluation parameters {training_args}") # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. dataset = load_dataset( data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, keep_in_memory=False ) if "validation" not in dataset.keys(): dataset["validation"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"train[:{data_args.validation_split_percentage}%]", cache_dir=model_args.cache_dir, ) dataset["train"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"train[{data_args.validation_split_percentage}%:]", cache_dir=model_args.cache_dir, ) else: data_files = {} if data_args.train_file is not None: data_files["train"] = data_args.train_file extension = data_args.train_file.split(".")[-1] if data_args.validation_file is not None: data_files["validation"] = data_args.validation_file extension = data_args.validation_file.split(".")[-1] if extension == "txt": extension = "text" dataset = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets. # Load pretrained config and tokenizer if model_args.config_name: config = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir) elif model_args.model_name_or_path: config = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir) else: config = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") if model_args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer ) elif model_args.model_name_or_path: tokenizer = AutoTokenizer.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer ) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script. " "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) if training_args.do_train: column_names = dataset["train"].column_names else: column_names = dataset["validation"].column_names text_column_name = "text" if "text" in column_names else column_names[0] # since this will be pickled to avoid _LazyModule error in Hasher force logger loading before tokenize_function tok_logger = transformers.utils.logging.get_logger("transformers.tokenization_utils_base") def tokenize_function(examples): with CaptureLogger(tok_logger) as cl: output = tokenizer(examples[text_column_name]) # clm input could be much much longer than block_size if "Token indices sequence length is longer than the" in cl.out: tok_logger.warning( "^^^^^^^^^^^^^^^^ Please ignore the warning above - this long input will be chunked into smaller bits" " before being passed to the model." ) return output tokenized_datasets = dataset.map( tokenize_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, ) if data_args.block_size is None: block_size = tokenizer.model_max_length if block_size > config.max_position_embeddings: logger.warning( f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). " f"Using block_size={min(1024, config.max_position_embeddings)} instead. You can change that default value by passing --block_size xxx." ) block_size = min(1024, config.max_position_embeddings) else: if data_args.block_size > tokenizer.model_max_length: logger.warning( f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model " f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}." ) block_size = min(data_args.block_size, tokenizer.model_max_length) # Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size. def group_texts(examples): # Concatenate all texts. concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()} total_length = len(concatenated_examples[list(examples.keys())[0]]) # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can # customize this part to your needs. if total_length >= block_size: total_length = (total_length // block_size) * block_size # Split by chunks of max_len. result = { k: [t[i : i + block_size] for i in range(0, total_length, block_size)] for k, t in concatenated_examples.items() } result["labels"] = result["input_ids"].copy() return result # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder # for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower # to preprocess. # # To speed up this part, we use multiprocessing. See the documentation of the map method for more information: # https://huggingface.co/docs/datasets/process#map lm_datasets = tokenized_datasets.map( group_texts, batched=True, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, ) if training_args.do_train: if "train" not in tokenized_datasets: raise ValueError("--do_train requires a train dataset") train_dataset = lm_datasets["train"] if data_args.max_train_samples is not None: max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) if training_args.do_eval: if "validation" not in tokenized_datasets: raise ValueError("--do_eval requires a validation dataset") eval_dataset = lm_datasets["validation"] if data_args.max_eval_samples is not None: max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) eval_dataset = eval_dataset.select(range(max_eval_samples)) # Enable tensorboard only on the master node has_tensorboard = is_tensorboard_available() if has_tensorboard and jax.process_index() == 0: try: from flax.metrics.tensorboard import SummaryWriter summary_writer = SummaryWriter(log_dir=Path(training_args.output_dir)) except ImportError as ie: has_tensorboard = False logger.warning( f"Unable to display metrics through TensorBoard because some package are not installed: {ie}" ) else: logger.warning( "Unable to display metrics through TensorBoard because the package is not installed: " "Please run pip install tensorboard to enable." ) # Initialize our training rng = jax.random.PRNGKey(training_args.seed) rng, dropout_rng = jax.random.split(rng) # Store some constant num_epochs = int(training_args.num_train_epochs) train_batch_size = int(training_args.per_device_train_batch_size) * jax.device_count() eval_batch_size = int(training_args.per_device_eval_batch_size) * jax.device_count() steps_per_epoch = len(train_dataset) // train_batch_size total_train_steps = steps_per_epoch * num_epochs # TODO: weights should be initialized in pjitted fun, this won't work for REALLY large models # TODO: when loading from pre-trained model we need to make sure the vocab is divisible by num_partitions # GPT2's vocab is odd, we need to resize it for fine-tuning model = FlaxAutoModelForCausalLM.from_pretrained( model_args.model_name_or_path, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype) ) # Create learning rate schedule linear_decay_lr_schedule_fn = create_learning_rate_fn( len(train_dataset), train_batch_size, training_args.num_train_epochs, training_args.warmup_steps, training_args.learning_rate, ) optimizer = optax.adamw( learning_rate=linear_decay_lr_schedule_fn, b1=training_args.adam_beta1, b2=training_args.adam_beta2, eps=training_args.adam_epsilon, weight_decay=training_args.weight_decay, ) def get_initial_state(params): state = optimizer.init(params) return tuple(state), params # Get PartitionSpec for model params param_spec = set_partitions(unfreeze(model.params)) # Get the PyTree for opt_state, we don't actually initialize the opt_state yet. params_shapes = jax.tree_util.tree_map(lambda x: x.shape, model.params) state_shapes = jax.eval_shape(get_initial_state, params_shapes) # get PartitionSpec for opt_state, this is very specific to adamw # TODO: optax returns different state for different optimizers, how can we handle this generically ? # or maybe we don't since in our examples we just use adamw or adafactor def get_opt_spec(x): if isinstance(x, dict): return param_spec return None opt_state_spec, param_spec = jax.tree_util.tree_map( get_opt_spec, state_shapes, is_leaf=lambda x: isinstance(x, (dict, optax.EmptyState)) ) # pjit the get_initial_state function to shard params and init # optimizer state in sharded way p_get_initial_state = pjit( get_initial_state, in_axis_resources=None, out_axis_resources=(opt_state_spec, param_spec), ) # hack: move the inital params to CPU to free up device memory # TODO: allow loading weights on CPU in pre-trained model model.params = jax.tree_util.tree_map(lambda x: np.asarray(x), model.params) # mesh defination mesh_devices = np.array(jax.devices()).reshape(1, jax.local_device_count()) # actually initialize the opt_state with mesh(mesh_devices, ("dp", "mp")): opt_state, params = p_get_initial_state(freeze(model.params)) # cross-entropy with z loss def loss_fn(logits, labels, z_loss=0): shift_logits = logits[..., :-1, :] shift_labels = labels[..., 1:] shift_labels = onehot(shift_labels, shift_logits.shape[-1]) shift_logits = shift_logits - jax.lax.stop_gradient(shift_logits.max(axis=-1, keepdims=True)) log_z = jnp.log(jnp.sum(jnp.exp(shift_logits), axis=-1, keepdims=True)) log_softmax = shift_logits - log_z loss = -jnp.sum(shift_labels * log_softmax, axis=-1) loss += (1e-4 * jnp.square(log_z.squeeze(-1))) * z_loss return loss.mean() # Define gradient update step fn # TODO: try to use TrainState instead of passing params and opt_state individually def train_step(params, opt_state, dropout_rng, batch, step): dropout_rng, new_dropout_rng = jax.random.split(dropout_rng) def compute_loss(params): labels = batch.pop("labels") logits = model(**batch, params=params, dropout_rng=dropout_rng, train=True)[0] loss = loss_fn(logits, labels, z_loss=1.0) return loss grad_fn = jax.value_and_grad(compute_loss) loss, grads = grad_fn(params) updates, new_opt_state = optimizer.update(grads, opt_state, params) new_params = optax.apply_updates(params, updates) metrics = {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(step)} return new_params, tuple(new_opt_state), new_dropout_rng, metrics, step + 1 # Define eval fn def eval_step(input_ids, labels, params): logits = model(input_ids=input_ids, params=params, train=False)[0] loss = loss_fn(logits, labels) # metrics return {"loss": loss} p_train_step = pjit( train_step, in_axis_resources=(param_spec, opt_state_spec, None, None, None), out_axis_resources=(param_spec, opt_state_spec, None, None, None), donate_argnums=(0, 1), ) p_eval_step = pjit( eval_step, in_axis_resources=(None, None, param_spec), out_axis_resources=None, ) logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {num_epochs}") logger.info(f" Instantaneous batch size per device = {training_args.per_device_train_batch_size}") logger.info(f" Total train batch size (w. parallel & distributed) = {train_batch_size}") logger.info(f" Total optimization steps = {total_train_steps}") train_time = 0 train_metrics = [] epochs = tqdm(range(num_epochs), desc=f"Epoch ... (1/{num_epochs})", position=0) global_step = 0 # we are not doing 2D parallelism (yet!), this just does model parallelism with mesh(mesh_devices, ("dp", "mp")): for _ in epochs: # ======================== Training ================================ train_start = time.time() # Create sampling rng rng, input_rng = jax.random.split(rng) # Generate an epoch by shuffling sampling indices from the train dataset train_metrics = [] train_loader = data_loader(input_rng, train_dataset, train_batch_size, shuffle=True) steps_per_epoch = len(train_dataset) // train_batch_size # train for _ in tqdm(range(steps_per_epoch), desc="Training...", position=1, leave=False): batch = next(train_loader) params, opt_state, dropout_rng, train_metric, global_step = p_train_step( params, opt_state, dropout_rng, batch, global_step, ) train_metrics.append(train_metric) cur_step = global_step if cur_step % training_args.logging_steps == 0 and cur_step > 0: # Save metrics train_time += time.time() - train_start if has_tensorboard and jax.process_index() == 0: write_train_metric(summary_writer, train_metrics, train_time, cur_step) epochs.write( f"Step... ({cur_step} | Loss: {train_metric['loss']}, Learning Rate:" f" {train_metric['learning_rate']})" ) train_metrics = [] if cur_step % training_args.eval_steps == 0 and cur_step > 0: # ======================== Evaluating ============================== eval_metrics = [] eval_loader = data_loader(input_rng, eval_dataset, eval_batch_size) eval_steps = len(eval_dataset) // eval_batch_size for _ in tqdm(range(eval_steps), desc="Evaluating...", position=2, leave=False): batch = next(eval_loader) metrics = p_eval_step(batch["input_ids"], batch["labels"], params) eval_metrics.append(metrics) # normalize eval metrics eval_metrics = stack_forest(eval_metrics) eval_metrics = jax.tree_util.tree_map(jnp.mean, eval_metrics) try: eval_metrics["perplexity"] = math.exp(eval_metrics["loss"]) except OverflowError: eval_metrics["perplexity"] = float("inf") logger.info( f"Step... ({cur_step} | Eval loss: {eval_metrics['loss']} | Eval Perplexity:" f" {eval_metrics['perplexity']}" ) if cur_step % training_args.save_steps == 0 and cur_step > 0: # save checkpoint after each epoch and push checkpoint to the hub if jax.process_index() == 0: params = jax.device_get(params) model.save_pretrained( training_args.output_dir, params=params, push_to_hub=training_args.push_to_hub, commit_message=f"Saving weights and logs of step {cur_step}", ) if __name__ == "__main__": main()
transformers/examples/research_projects/jax-projects/model_parallel/run_clm_mp.py/0
{ "file_path": "transformers/examples/research_projects/jax-projects/model_parallel/run_clm_mp.py", "repo_id": "transformers", "token_count": 11794 }
299
""" coding=utf-8 Copyright 2018, Antonio Mendoza Hao Tan, Mohit Bansal Adapted From Facebook Inc, Detectron2 && Huggingface Co. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.import copy """ import itertools import math import os from abc import ABCMeta, abstractmethod from collections import OrderedDict, namedtuple from typing import Dict, List, Tuple import numpy as np import torch from torch import nn from torch.nn.modules.batchnorm import BatchNorm2d from torchvision.ops import RoIPool from torchvision.ops.boxes import batched_nms, nms from utils import WEIGHTS_NAME, Config, cached_path, hf_bucket_url, is_remote_url, load_checkpoint # other: def norm_box(boxes, raw_sizes): if not isinstance(boxes, torch.Tensor): normalized_boxes = boxes.copy() else: normalized_boxes = boxes.clone() normalized_boxes[:, :, (0, 2)] /= raw_sizes[:, 1] normalized_boxes[:, :, (1, 3)] /= raw_sizes[:, 0] return normalized_boxes def pad_list_tensors( list_tensors, preds_per_image, max_detections=None, return_tensors=None, padding=None, pad_value=0, location=None, ): """ location will always be cpu for np tensors """ if location is None: location = "cpu" assert return_tensors in {"pt", "np", None} assert padding in {"max_detections", "max_batch", None} new = [] if padding is None: if return_tensors is None: return list_tensors elif return_tensors == "pt": if not isinstance(list_tensors, torch.Tensor): return torch.stack(list_tensors).to(location) else: return list_tensors.to(location) else: if not isinstance(list_tensors, list): return np.array(list_tensors.to(location)) else: return list_tensors.to(location) if padding == "max_detections": assert max_detections is not None, "specify max number of detections per batch" elif padding == "max_batch": max_detections = max(preds_per_image) for i in range(len(list_tensors)): too_small = False tensor_i = list_tensors.pop(0) if tensor_i.ndim < 2: too_small = True tensor_i = tensor_i.unsqueeze(-1) assert isinstance(tensor_i, torch.Tensor) tensor_i = nn.functional.pad( input=tensor_i, pad=(0, 0, 0, max_detections - preds_per_image[i]), mode="constant", value=pad_value, ) if too_small: tensor_i = tensor_i.squeeze(-1) if return_tensors is None: if location == "cpu": tensor_i = tensor_i.cpu() tensor_i = tensor_i.tolist() if return_tensors == "np": if location == "cpu": tensor_i = tensor_i.cpu() tensor_i = tensor_i.numpy() else: if location == "cpu": tensor_i = tensor_i.cpu() new.append(tensor_i) if return_tensors == "np": return np.stack(new, axis=0) elif return_tensors == "pt" and not isinstance(new, torch.Tensor): return torch.stack(new, dim=0) else: return list_tensors def do_nms(boxes, scores, image_shape, score_thresh, nms_thresh, mind, maxd): scores = scores[:, :-1] num_bbox_reg_classes = boxes.shape[1] // 4 # Convert to Boxes to use the `clip` function ... boxes = boxes.reshape(-1, 4) _clip_box(boxes, image_shape) boxes = boxes.view(-1, num_bbox_reg_classes, 4) # R x C x 4 # Select max scores max_scores, max_classes = scores.max(1) # R x C --> R num_objs = boxes.size(0) boxes = boxes.view(-1, 4) idxs = torch.arange(num_objs).to(boxes.device) * num_bbox_reg_classes + max_classes max_boxes = boxes[idxs] # Select max boxes according to the max scores. # Apply NMS keep = nms(max_boxes, max_scores, nms_thresh) keep = keep[:maxd] if keep.shape[-1] >= mind and keep.shape[-1] <= maxd: max_boxes, max_scores = max_boxes[keep], max_scores[keep] classes = max_classes[keep] return max_boxes, max_scores, classes, keep else: return None # Helper Functions def _clip_box(tensor, box_size: Tuple[int, int]): assert torch.isfinite(tensor).all(), "Box tensor contains infinite or NaN!" h, w = box_size tensor[:, 0].clamp_(min=0, max=w) tensor[:, 1].clamp_(min=0, max=h) tensor[:, 2].clamp_(min=0, max=w) tensor[:, 3].clamp_(min=0, max=h) def _nonempty_boxes(box, threshold: float = 0.0) -> torch.Tensor: widths = box[:, 2] - box[:, 0] heights = box[:, 3] - box[:, 1] keep = (widths > threshold) & (heights > threshold) return keep def get_norm(norm, out_channels): if isinstance(norm, str): if len(norm) == 0: return None norm = { "BN": BatchNorm2d, "GN": lambda channels: nn.GroupNorm(32, channels), "nnSyncBN": nn.SyncBatchNorm, # keep for debugging "": lambda x: x, }[norm] return norm(out_channels) def _create_grid_offsets(size: List[int], stride: int, offset: float, device): grid_height, grid_width = size shifts_x = torch.arange( offset * stride, grid_width * stride, step=stride, dtype=torch.float32, device=device, ) shifts_y = torch.arange( offset * stride, grid_height * stride, step=stride, dtype=torch.float32, device=device, ) shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x) shift_x = shift_x.reshape(-1) shift_y = shift_y.reshape(-1) return shift_x, shift_y def build_backbone(cfg): input_shape = ShapeSpec(channels=len(cfg.MODEL.PIXEL_MEAN)) norm = cfg.RESNETS.NORM stem = BasicStem( in_channels=input_shape.channels, out_channels=cfg.RESNETS.STEM_OUT_CHANNELS, norm=norm, caffe_maxpool=cfg.MODEL.MAX_POOL, ) freeze_at = cfg.BACKBONE.FREEZE_AT if freeze_at >= 1: for p in stem.parameters(): p.requires_grad = False out_features = cfg.RESNETS.OUT_FEATURES depth = cfg.RESNETS.DEPTH num_groups = cfg.RESNETS.NUM_GROUPS width_per_group = cfg.RESNETS.WIDTH_PER_GROUP bottleneck_channels = num_groups * width_per_group in_channels = cfg.RESNETS.STEM_OUT_CHANNELS out_channels = cfg.RESNETS.RES2_OUT_CHANNELS stride_in_1x1 = cfg.RESNETS.STRIDE_IN_1X1 res5_dilation = cfg.RESNETS.RES5_DILATION assert res5_dilation in {1, 2}, "res5_dilation cannot be {}.".format(res5_dilation) num_blocks_per_stage = {50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3]}[depth] stages = [] out_stage_idx = [{"res2": 2, "res3": 3, "res4": 4, "res5": 5}[f] for f in out_features] max_stage_idx = max(out_stage_idx) for idx, stage_idx in enumerate(range(2, max_stage_idx + 1)): dilation = res5_dilation if stage_idx == 5 else 1 first_stride = 1 if idx == 0 or (stage_idx == 5 and dilation == 2) else 2 stage_kargs = { "num_blocks": num_blocks_per_stage[idx], "first_stride": first_stride, "in_channels": in_channels, "bottleneck_channels": bottleneck_channels, "out_channels": out_channels, "num_groups": num_groups, "norm": norm, "stride_in_1x1": stride_in_1x1, "dilation": dilation, } stage_kargs["block_class"] = BottleneckBlock blocks = ResNet.make_stage(**stage_kargs) in_channels = out_channels out_channels *= 2 bottleneck_channels *= 2 if freeze_at >= stage_idx: for block in blocks: block.freeze() stages.append(blocks) return ResNet(stem, stages, out_features=out_features) def find_top_rpn_proposals( proposals, pred_objectness_logits, images, image_sizes, nms_thresh, pre_nms_topk, post_nms_topk, min_box_side_len, training, ): """Args: proposals (list[Tensor]): (L, N, Hi*Wi*A, 4). pred_objectness_logits: tensors of length L. nms_thresh (float): IoU threshold to use for NMS pre_nms_topk (int): before nms post_nms_topk (int): after nms min_box_side_len (float): minimum proposal box side training (bool): True if proposals are to be used in training, Returns: results (List[Dict]): stores post_nms_topk object proposals for image i. """ num_images = len(images) device = proposals[0].device # 1. Select top-k anchor for every level and every image topk_scores = [] # #lvl Tensor, each of shape N x topk topk_proposals = [] level_ids = [] # #lvl Tensor, each of shape (topk,) batch_idx = torch.arange(num_images, device=device) for level_id, proposals_i, logits_i in zip(itertools.count(), proposals, pred_objectness_logits): Hi_Wi_A = logits_i.shape[1] num_proposals_i = min(pre_nms_topk, Hi_Wi_A) # sort is faster than topk (https://github.com/pytorch/pytorch/issues/22812) # topk_scores_i, topk_idx = logits_i.topk(num_proposals_i, dim=1) logits_i, idx = logits_i.sort(descending=True, dim=1) topk_scores_i = logits_i[batch_idx, :num_proposals_i] topk_idx = idx[batch_idx, :num_proposals_i] # each is N x topk topk_proposals_i = proposals_i[batch_idx[:, None], topk_idx] # N x topk x 4 topk_proposals.append(topk_proposals_i) topk_scores.append(topk_scores_i) level_ids.append(torch.full((num_proposals_i,), level_id, dtype=torch.int64, device=device)) # 2. Concat all levels together topk_scores = torch.cat(topk_scores, dim=1) topk_proposals = torch.cat(topk_proposals, dim=1) level_ids = torch.cat(level_ids, dim=0) # if I change to batched_nms, I wonder if this will make a difference # 3. For each image, run a per-level NMS, and choose topk results. results = [] for n, image_size in enumerate(image_sizes): boxes = topk_proposals[n] scores_per_img = topk_scores[n] # I will have to take a look at the boxes clip method _clip_box(boxes, image_size) # filter empty boxes keep = _nonempty_boxes(boxes, threshold=min_box_side_len) lvl = level_ids if keep.sum().item() != len(boxes): boxes, scores_per_img, lvl = ( boxes[keep], scores_per_img[keep], level_ids[keep], ) keep = batched_nms(boxes, scores_per_img, lvl, nms_thresh) keep = keep[:post_nms_topk] res = (boxes[keep], scores_per_img[keep]) results.append(res) # I wonder if it would be possible for me to pad all these things. return results def subsample_labels(labels, num_samples, positive_fraction, bg_label): """ Returns: pos_idx, neg_idx (Tensor): 1D vector of indices. The total length of both is `num_samples` or fewer. """ positive = torch.nonzero((labels != -1) & (labels != bg_label)).squeeze(1) negative = torch.nonzero(labels == bg_label).squeeze(1) num_pos = int(num_samples * positive_fraction) # protect against not enough positive examples num_pos = min(positive.numel(), num_pos) num_neg = num_samples - num_pos # protect against not enough negative examples num_neg = min(negative.numel(), num_neg) # randomly select positive and negative examples perm1 = torch.randperm(positive.numel(), device=positive.device)[:num_pos] perm2 = torch.randperm(negative.numel(), device=negative.device)[:num_neg] pos_idx = positive[perm1] neg_idx = negative[perm2] return pos_idx, neg_idx def add_ground_truth_to_proposals(gt_boxes, proposals): raise NotImplementedError() def add_ground_truth_to_proposals_single_image(gt_boxes, proposals): raise NotImplementedError() def _fmt_box_list(box_tensor, batch_index: int): repeated_index = torch.full( (len(box_tensor), 1), batch_index, dtype=box_tensor.dtype, device=box_tensor.device, ) return torch.cat((repeated_index, box_tensor), dim=1) def convert_boxes_to_pooler_format(box_lists: List[torch.Tensor]): pooler_fmt_boxes = torch.cat( [_fmt_box_list(box_list, i) for i, box_list in enumerate(box_lists)], dim=0, ) return pooler_fmt_boxes def assign_boxes_to_levels( box_lists: List[torch.Tensor], min_level: int, max_level: int, canonical_box_size: int, canonical_level: int, ): box_sizes = torch.sqrt(torch.cat([boxes.area() for boxes in box_lists])) # Eqn.(1) in FPN paper level_assignments = torch.floor(canonical_level + torch.log2(box_sizes / canonical_box_size + 1e-8)) # clamp level to (min, max), in case the box size is too large or too small # for the available feature maps level_assignments = torch.clamp(level_assignments, min=min_level, max=max_level) return level_assignments.to(torch.int64) - min_level # Helper Classes class _NewEmptyTensorOp(torch.autograd.Function): @staticmethod def forward(ctx, x, new_shape): ctx.shape = x.shape return x.new_empty(new_shape) @staticmethod def backward(ctx, grad): shape = ctx.shape return _NewEmptyTensorOp.apply(grad, shape), None class ShapeSpec(namedtuple("_ShapeSpec", ["channels", "height", "width", "stride"])): def __new__(cls, *, channels=None, height=None, width=None, stride=None): return super().__new__(cls, channels, height, width, stride) class Box2BoxTransform(object): """ This R-CNN transformation scales the box's width and height by exp(dw), exp(dh) and shifts a box's center by the offset (dx * width, dy * height). """ def __init__(self, weights: Tuple[float, float, float, float], scale_clamp: float = None): """ Args: weights (4-element tuple): Scaling factors that are applied to the (dx, dy, dw, dh) deltas. In Fast R-CNN, these were originally set such that the deltas have unit variance; now they are treated as hyperparameters of the system. scale_clamp (float): When predicting deltas, the predicted box scaling factors (dw and dh) are clamped such that they are <= scale_clamp. """ self.weights = weights if scale_clamp is not None: self.scale_clamp = scale_clamp else: """ Value for clamping large dw and dh predictions. The heuristic is that we clamp such that dw and dh are no larger than what would transform a 16px box into a 1000px box (based on a small anchor, 16px, and a typical image size, 1000px). """ self.scale_clamp = math.log(1000.0 / 16) def get_deltas(self, src_boxes, target_boxes): """ Get box regression transformation deltas (dx, dy, dw, dh) that can be used to transform the `src_boxes` into the `target_boxes`. That is, the relation ``target_boxes == self.apply_deltas(deltas, src_boxes)`` is true (unless any delta is too large and is clamped). Args: src_boxes (Tensor): source boxes, e.g., object proposals target_boxes (Tensor): target of the transformation, e.g., ground-truth boxes. """ assert isinstance(src_boxes, torch.Tensor), type(src_boxes) assert isinstance(target_boxes, torch.Tensor), type(target_boxes) src_widths = src_boxes[:, 2] - src_boxes[:, 0] src_heights = src_boxes[:, 3] - src_boxes[:, 1] src_ctr_x = src_boxes[:, 0] + 0.5 * src_widths src_ctr_y = src_boxes[:, 1] + 0.5 * src_heights target_widths = target_boxes[:, 2] - target_boxes[:, 0] target_heights = target_boxes[:, 3] - target_boxes[:, 1] target_ctr_x = target_boxes[:, 0] + 0.5 * target_widths target_ctr_y = target_boxes[:, 1] + 0.5 * target_heights wx, wy, ww, wh = self.weights dx = wx * (target_ctr_x - src_ctr_x) / src_widths dy = wy * (target_ctr_y - src_ctr_y) / src_heights dw = ww * torch.log(target_widths / src_widths) dh = wh * torch.log(target_heights / src_heights) deltas = torch.stack((dx, dy, dw, dh), dim=1) assert (src_widths > 0).all().item(), "Input boxes to Box2BoxTransform are not valid!" return deltas def apply_deltas(self, deltas, boxes): """ Apply transformation `deltas` (dx, dy, dw, dh) to `boxes`. Args: deltas (Tensor): transformation deltas of shape (N, k*4), where k >= 1. deltas[i] represents k potentially different class-specific box transformations for the single box boxes[i]. boxes (Tensor): boxes to transform, of shape (N, 4) """ boxes = boxes.to(deltas.dtype) widths = boxes[:, 2] - boxes[:, 0] heights = boxes[:, 3] - boxes[:, 1] ctr_x = boxes[:, 0] + 0.5 * widths ctr_y = boxes[:, 1] + 0.5 * heights wx, wy, ww, wh = self.weights dx = deltas[:, 0::4] / wx dy = deltas[:, 1::4] / wy dw = deltas[:, 2::4] / ww dh = deltas[:, 3::4] / wh # Prevent sending too large values into torch.exp() dw = torch.clamp(dw, max=self.scale_clamp) dh = torch.clamp(dh, max=self.scale_clamp) pred_ctr_x = dx * widths[:, None] + ctr_x[:, None] pred_ctr_y = dy * heights[:, None] + ctr_y[:, None] pred_w = torch.exp(dw) * widths[:, None] pred_h = torch.exp(dh) * heights[:, None] pred_boxes = torch.zeros_like(deltas) pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w # x1 pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h # y1 pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w # x2 pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h # y2 return pred_boxes class Matcher(object): """ This class assigns to each predicted "element" (e.g., a box) a ground-truth element. Each predicted element will have exactly zero or one matches; each ground-truth element may be matched to zero or more predicted elements. The matching is determined by the MxN match_quality_matrix, that characterizes how well each (ground-truth, prediction)-pair match each other. For example, if the elements are boxes, this matrix may contain box intersection-over-union overlap values. The matcher returns (a) a vector of length N containing the index of the ground-truth element m in [0, M) that matches to prediction n in [0, N). (b) a vector of length N containing the labels for each prediction. """ def __init__( self, thresholds: List[float], labels: List[int], allow_low_quality_matches: bool = False, ): """ Args: thresholds (list): a list of thresholds used to stratify predictions into levels. labels (list): a list of values to label predictions belonging at each level. A label can be one of {-1, 0, 1} signifying {ignore, negative class, positive class}, respectively. allow_low_quality_matches (bool): if True, produce additional matches or predictions with maximum match quality lower than high_threshold. For example, thresholds = [0.3, 0.5] labels = [0, -1, 1] All predictions with iou < 0.3 will be marked with 0 and thus will be considered as false positives while training. All predictions with 0.3 <= iou < 0.5 will be marked with -1 and thus will be ignored. All predictions with 0.5 <= iou will be marked with 1 and thus will be considered as true positives. """ thresholds = thresholds[:] assert thresholds[0] > 0 thresholds.insert(0, -float("inf")) thresholds.append(float("inf")) assert all(low <= high for (low, high) in zip(thresholds[:-1], thresholds[1:])) assert all(label_i in [-1, 0, 1] for label_i in labels) assert len(labels) == len(thresholds) - 1 self.thresholds = thresholds self.labels = labels self.allow_low_quality_matches = allow_low_quality_matches def __call__(self, match_quality_matrix): """ Args: match_quality_matrix (Tensor[float]): an MxN tensor, containing the pairwise quality between M ground-truth elements and N predicted elements. All elements must be >= 0 (due to the us of `torch.nonzero` for selecting indices in :meth:`set_low_quality_matches_`). Returns: matches (Tensor[int64]): a vector of length N, where matches[i] is a matched ground-truth index in [0, M) match_labels (Tensor[int8]): a vector of length N, where pred_labels[i] indicates true or false positive or ignored """ assert match_quality_matrix.dim() == 2 if match_quality_matrix.numel() == 0: default_matches = match_quality_matrix.new_full((match_quality_matrix.size(1),), 0, dtype=torch.int64) # When no gt boxes exist, we define IOU = 0 and therefore set labels # to `self.labels[0]`, which usually defaults to background class 0 # To choose to ignore instead, # can make labels=[-1,0,-1,1] + set appropriate thresholds default_match_labels = match_quality_matrix.new_full( (match_quality_matrix.size(1),), self.labels[0], dtype=torch.int8 ) return default_matches, default_match_labels assert torch.all(match_quality_matrix >= 0) # match_quality_matrix is M (gt) x N (predicted) # Max over gt elements (dim 0) to find best gt candidate for each prediction matched_vals, matches = match_quality_matrix.max(dim=0) match_labels = matches.new_full(matches.size(), 1, dtype=torch.int8) for l, low, high in zip(self.labels, self.thresholds[:-1], self.thresholds[1:]): low_high = (matched_vals >= low) & (matched_vals < high) match_labels[low_high] = l if self.allow_low_quality_matches: self.set_low_quality_matches_(match_labels, match_quality_matrix) return matches, match_labels def set_low_quality_matches_(self, match_labels, match_quality_matrix): """ Produce additional matches for predictions that have only low-quality matches. Specifically, for each ground-truth G find the set of predictions that have maximum overlap with it (including ties); for each prediction in that set, if it is unmatched, then match it to the ground-truth G. This function implements the RPN assignment case (i) in Sec. 3.1.2 of Faster R-CNN. """ # For each gt, find the prediction with which it has highest quality highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1) # Find the highest quality match available, even if it is low, including ties. # Note that the matches qualities must be positive due to the use of # `torch.nonzero`. of_quality_inds = match_quality_matrix == highest_quality_foreach_gt[:, None] if of_quality_inds.dim() == 0: (_, pred_inds_with_highest_quality) = of_quality_inds.unsqueeze(0).nonzero().unbind(1) else: (_, pred_inds_with_highest_quality) = of_quality_inds.nonzero().unbind(1) match_labels[pred_inds_with_highest_quality] = 1 class RPNOutputs(object): def __init__( self, box2box_transform, anchor_matcher, batch_size_per_image, positive_fraction, images, pred_objectness_logits, pred_anchor_deltas, anchors, boundary_threshold=0, gt_boxes=None, smooth_l1_beta=0.0, ): """ Args: box2box_transform (Box2BoxTransform): :class:`Box2BoxTransform` instance for anchor-proposal transformations. anchor_matcher (Matcher): :class:`Matcher` instance for matching anchors to ground-truth boxes; used to determine training labels. batch_size_per_image (int): number of proposals to sample when training positive_fraction (float): target fraction of sampled proposals that should be positive images (ImageList): :class:`ImageList` instance representing N input images pred_objectness_logits (list[Tensor]): A list of L elements. Element i is a tensor of shape (N, A, Hi, W) pred_anchor_deltas (list[Tensor]): A list of L elements. Element i is a tensor of shape (N, A*4, Hi, Wi) anchors (list[torch.Tensor]): nested list of boxes. anchors[i][j] at (n, l) stores anchor array for feature map l boundary_threshold (int): if >= 0, then anchors that extend beyond the image boundary by more than boundary_thresh are not used in training. gt_boxes (list[Boxes], optional): A list of N elements. smooth_l1_beta (float): The transition point between L1 and L2 lossn. When set to 0, the loss becomes L1. When +inf, it is ignored """ self.box2box_transform = box2box_transform self.anchor_matcher = anchor_matcher self.batch_size_per_image = batch_size_per_image self.positive_fraction = positive_fraction self.pred_objectness_logits = pred_objectness_logits self.pred_anchor_deltas = pred_anchor_deltas self.anchors = anchors self.gt_boxes = gt_boxes self.num_feature_maps = len(pred_objectness_logits) self.num_images = len(images) self.boundary_threshold = boundary_threshold self.smooth_l1_beta = smooth_l1_beta def _get_ground_truth(self): raise NotImplementedError() def predict_proposals(self): # pred_anchor_deltas: (L, N, ? Hi, Wi) # anchors:(N, L, -1, B) # here we loop over specific feature map, NOT images proposals = [] anchors = self.anchors.transpose(0, 1) for anchors_i, pred_anchor_deltas_i in zip(anchors, self.pred_anchor_deltas): B = anchors_i.size(-1) N, _, Hi, Wi = pred_anchor_deltas_i.shape anchors_i = anchors_i.flatten(start_dim=0, end_dim=1) pred_anchor_deltas_i = pred_anchor_deltas_i.view(N, -1, B, Hi, Wi).permute(0, 3, 4, 1, 2).reshape(-1, B) proposals_i = self.box2box_transform.apply_deltas(pred_anchor_deltas_i, anchors_i) # Append feature map proposals with shape (N, Hi*Wi*A, B) proposals.append(proposals_i.view(N, -1, B)) proposals = torch.stack(proposals) return proposals def predict_objectness_logits(self): """ Returns: pred_objectness_logits (list[Tensor]) -> (N, Hi*Wi*A). """ pred_objectness_logits = [ # Reshape: (N, A, Hi, Wi) -> (N, Hi, Wi, A) -> (N, Hi*Wi*A) score.permute(0, 2, 3, 1).reshape(self.num_images, -1) for score in self.pred_objectness_logits ] return pred_objectness_logits # Main Classes class Conv2d(nn.Conv2d): def __init__(self, *args, **kwargs): norm = kwargs.pop("norm", None) activation = kwargs.pop("activation", None) super().__init__(*args, **kwargs) self.norm = norm self.activation = activation def forward(self, x): if x.numel() == 0 and self.training: assert not isinstance(self.norm, nn.SyncBatchNorm) if x.numel() == 0: assert not isinstance(self.norm, nn.GroupNorm) output_shape = [ (i + 2 * p - (di * (k - 1) + 1)) // s + 1 for i, p, di, k, s in zip( x.shape[-2:], self.padding, self.dilation, self.kernel_size, self.stride, ) ] output_shape = [x.shape[0], self.weight.shape[0]] + output_shape empty = _NewEmptyTensorOp.apply(x, output_shape) if self.training: _dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0 return empty + _dummy else: return empty x = super().forward(x) if self.norm is not None: x = self.norm(x) if self.activation is not None: x = self.activation(x) return x class LastLevelMaxPool(nn.Module): """ This module is used in the original FPN to generate a downsampled P6 feature from P5. """ def __init__(self): super().__init__() self.num_levels = 1 self.in_feature = "p5" def forward(self, x): return [nn.functional.max_pool2d(x, kernel_size=1, stride=2, padding=0)] class LastLevelP6P7(nn.Module): """ This module is used in RetinaNet to generate extra layers, P6 and P7 from C5 feature. """ def __init__(self, in_channels, out_channels): super().__init__() self.num_levels = 2 self.in_feature = "res5" self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1) self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1) def forward(self, c5): p6 = self.p6(c5) p7 = self.p7(nn.functional.relu(p6)) return [p6, p7] class BasicStem(nn.Module): def __init__(self, in_channels=3, out_channels=64, norm="BN", caffe_maxpool=False): super().__init__() self.conv1 = Conv2d( in_channels, out_channels, kernel_size=7, stride=2, padding=3, bias=False, norm=get_norm(norm, out_channels), ) self.caffe_maxpool = caffe_maxpool # use pad 1 instead of pad zero def forward(self, x): x = self.conv1(x) x = nn.functional.relu_(x) if self.caffe_maxpool: x = nn.functional.max_pool2d(x, kernel_size=3, stride=2, padding=0, ceil_mode=True) else: x = nn.functional.max_pool2d(x, kernel_size=3, stride=2, padding=1) return x @property def out_channels(self): return self.conv1.out_channels @property def stride(self): return 4 # = stride 2 conv -> stride 2 max pool class ResNetBlockBase(nn.Module): def __init__(self, in_channels, out_channels, stride): super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.stride = stride def freeze(self): for p in self.parameters(): p.requires_grad = False return self class BottleneckBlock(ResNetBlockBase): def __init__( self, in_channels, out_channels, bottleneck_channels, stride=1, num_groups=1, norm="BN", stride_in_1x1=False, dilation=1, ): super().__init__(in_channels, out_channels, stride) if in_channels != out_channels: self.shortcut = Conv2d( in_channels, out_channels, kernel_size=1, stride=stride, bias=False, norm=get_norm(norm, out_channels), ) else: self.shortcut = None # The original MSRA ResNet models have stride in the first 1x1 conv # The subsequent fb.torch.resnet and Caffe2 ResNe[X]t implementations have # stride in the 3x3 conv stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride) self.conv1 = Conv2d( in_channels, bottleneck_channels, kernel_size=1, stride=stride_1x1, bias=False, norm=get_norm(norm, bottleneck_channels), ) self.conv2 = Conv2d( bottleneck_channels, bottleneck_channels, kernel_size=3, stride=stride_3x3, padding=1 * dilation, bias=False, groups=num_groups, dilation=dilation, norm=get_norm(norm, bottleneck_channels), ) self.conv3 = Conv2d( bottleneck_channels, out_channels, kernel_size=1, bias=False, norm=get_norm(norm, out_channels), ) def forward(self, x): out = self.conv1(x) out = nn.functional.relu_(out) out = self.conv2(out) out = nn.functional.relu_(out) out = self.conv3(out) if self.shortcut is not None: shortcut = self.shortcut(x) else: shortcut = x out += shortcut out = nn.functional.relu_(out) return out class Backbone(nn.Module, metaclass=ABCMeta): def __init__(self): super().__init__() @abstractmethod def forward(self): pass @property def size_divisibility(self): """ Some backbones require the input height and width to be divisible by a specific integer. This is typically true for encoder / decoder type networks with lateral connection (e.g., FPN) for which feature maps need to match dimension in the "bottom up" and "top down" paths. Set to 0 if no specific input size divisibility is required. """ return 0 def output_shape(self): return { name: ShapeSpec( channels=self._out_feature_channels[name], stride=self._out_feature_strides[name], ) for name in self._out_features } @property def out_features(self): """deprecated""" return self._out_features @property def out_feature_strides(self): """deprecated""" return {f: self._out_feature_strides[f] for f in self._out_features} @property def out_feature_channels(self): """deprecated""" return {f: self._out_feature_channels[f] for f in self._out_features} class ResNet(Backbone): def __init__(self, stem, stages, num_classes=None, out_features=None): """ Args: stem (nn.Module): a stem module stages (list[list[ResNetBlock]]): several (typically 4) stages, each contains multiple :class:`ResNetBlockBase`. num_classes (None or int): if None, will not perform classification. out_features (list[str]): name of the layers whose outputs should be returned in forward. Can be anything in: "stem", "linear", or "res2" ... If None, will return the output of the last layer. """ super(ResNet, self).__init__() self.stem = stem self.num_classes = num_classes current_stride = self.stem.stride self._out_feature_strides = {"stem": current_stride} self._out_feature_channels = {"stem": self.stem.out_channels} self.stages_and_names = [] for i, blocks in enumerate(stages): for block in blocks: assert isinstance(block, ResNetBlockBase), block curr_channels = block.out_channels stage = nn.Sequential(*blocks) name = "res" + str(i + 2) self.add_module(name, stage) self.stages_and_names.append((stage, name)) self._out_feature_strides[name] = current_stride = int( current_stride * np.prod([k.stride for k in blocks]) ) self._out_feature_channels[name] = blocks[-1].out_channels if num_classes is not None: self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.linear = nn.Linear(curr_channels, num_classes) # Sec 5.1 in "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour": # "The 1000-way fully-connected layer is initialized by # drawing weights from a zero-mean Gaussian with std of 0.01." nn.init.normal_(self.linear.weight, stddev=0.01) name = "linear" if out_features is None: out_features = [name] self._out_features = out_features assert len(self._out_features) children = [x[0] for x in self.named_children()] for out_feature in self._out_features: assert out_feature in children, "Available children: {}".format(", ".join(children)) def forward(self, x): outputs = {} x = self.stem(x) if "stem" in self._out_features: outputs["stem"] = x for stage, name in self.stages_and_names: x = stage(x) if name in self._out_features: outputs[name] = x if self.num_classes is not None: x = self.avgpool(x) x = self.linear(x) if "linear" in self._out_features: outputs["linear"] = x return outputs def output_shape(self): return { name: ShapeSpec( channels=self._out_feature_channels[name], stride=self._out_feature_strides[name], ) for name in self._out_features } @staticmethod def make_stage( block_class, num_blocks, first_stride=None, *, in_channels, out_channels, **kwargs, ): """ Usually, layers that produce the same feature map spatial size are defined as one "stage". Under such definition, stride_per_block[1:] should all be 1. """ if first_stride is not None: assert "stride" not in kwargs and "stride_per_block" not in kwargs kwargs["stride_per_block"] = [first_stride] + [1] * (num_blocks - 1) blocks = [] for i in range(num_blocks): curr_kwargs = {} for k, v in kwargs.items(): if k.endswith("_per_block"): assert ( len(v) == num_blocks ), f"Argument '{k}' of make_stage should have the same length as num_blocks={num_blocks}." newk = k[: -len("_per_block")] assert newk not in kwargs, f"Cannot call make_stage with both {k} and {newk}!" curr_kwargs[newk] = v[i] else: curr_kwargs[k] = v blocks.append(block_class(in_channels=in_channels, out_channels=out_channels, **curr_kwargs)) in_channels = out_channels return blocks class ROIPooler(nn.Module): """ Region of interest feature map pooler that supports pooling from one or more feature maps. """ def __init__( self, output_size, scales, sampling_ratio, canonical_box_size=224, canonical_level=4, ): super().__init__() # assumption that stride is a power of 2. min_level = -math.log2(scales[0]) max_level = -math.log2(scales[-1]) # a bunch of testing assert math.isclose(min_level, int(min_level)) and math.isclose(max_level, int(max_level)) assert len(scales) == max_level - min_level + 1, "not pyramid" assert 0 < min_level and min_level <= max_level if isinstance(output_size, int): output_size = (output_size, output_size) assert len(output_size) == 2 and isinstance(output_size[0], int) and isinstance(output_size[1], int) if len(scales) > 1: assert min_level <= canonical_level and canonical_level <= max_level assert canonical_box_size > 0 self.output_size = output_size self.min_level = int(min_level) self.max_level = int(max_level) self.level_poolers = nn.ModuleList(RoIPool(output_size, spatial_scale=scale) for scale in scales) self.canonical_level = canonical_level self.canonical_box_size = canonical_box_size def forward(self, feature_maps, boxes): """ Args: feature_maps: List[torch.Tensor(N,C,W,H)] box_lists: list[torch.Tensor]) Returns: A tensor of shape(N*B, Channels, output_size, output_size) """ x = list(feature_maps.values()) num_level_assignments = len(self.level_poolers) assert len(x) == num_level_assignments and len(boxes) == x[0].size(0) pooler_fmt_boxes = convert_boxes_to_pooler_format(boxes) if num_level_assignments == 1: return self.level_poolers[0](x[0], pooler_fmt_boxes) level_assignments = assign_boxes_to_levels( boxes, self.min_level, self.max_level, self.canonical_box_size, self.canonical_level, ) num_boxes = len(pooler_fmt_boxes) num_channels = x[0].shape[1] output_size = self.output_size[0] dtype, device = x[0].dtype, x[0].device output = torch.zeros( (num_boxes, num_channels, output_size, output_size), dtype=dtype, device=device, ) for level, (x_level, pooler) in enumerate(zip(x, self.level_poolers)): inds = torch.nonzero(level_assignments == level).squeeze(1) pooler_fmt_boxes_level = pooler_fmt_boxes[inds] output[inds] = pooler(x_level, pooler_fmt_boxes_level) return output class ROIOutputs(object): def __init__(self, cfg, training=False): self.smooth_l1_beta = cfg.ROI_BOX_HEAD.SMOOTH_L1_BETA self.box2box_transform = Box2BoxTransform(weights=cfg.ROI_BOX_HEAD.BBOX_REG_WEIGHTS) self.training = training self.score_thresh = cfg.ROI_HEADS.SCORE_THRESH_TEST self.min_detections = cfg.MIN_DETECTIONS self.max_detections = cfg.MAX_DETECTIONS nms_thresh = cfg.ROI_HEADS.NMS_THRESH_TEST if not isinstance(nms_thresh, list): nms_thresh = [nms_thresh] self.nms_thresh = nms_thresh def _predict_boxes(self, proposals, box_deltas, preds_per_image): num_pred = box_deltas.size(0) B = proposals[0].size(-1) K = box_deltas.size(-1) // B box_deltas = box_deltas.view(num_pred * K, B) proposals = torch.cat(proposals, dim=0).unsqueeze(-2).expand(num_pred, K, B) proposals = proposals.reshape(-1, B) boxes = self.box2box_transform.apply_deltas(box_deltas, proposals) return boxes.view(num_pred, K * B).split(preds_per_image, dim=0) def _predict_objs(self, obj_logits, preds_per_image): probs = nn.functional.softmax(obj_logits, dim=-1) probs = probs.split(preds_per_image, dim=0) return probs def _predict_attrs(self, attr_logits, preds_per_image): attr_logits = attr_logits[..., :-1].softmax(-1) attr_probs, attrs = attr_logits.max(-1) return attr_probs.split(preds_per_image, dim=0), attrs.split(preds_per_image, dim=0) @torch.no_grad() def inference( self, obj_logits, attr_logits, box_deltas, pred_boxes, features, sizes, scales=None, ): # only the pred boxes is the preds_per_image = [p.size(0) for p in pred_boxes] boxes_all = self._predict_boxes(pred_boxes, box_deltas, preds_per_image) obj_scores_all = self._predict_objs(obj_logits, preds_per_image) # list of length N attr_probs_all, attrs_all = self._predict_attrs(attr_logits, preds_per_image) features = features.split(preds_per_image, dim=0) # fun for each image too, also I can experiment and do multiple images final_results = [] zipped = zip(boxes_all, obj_scores_all, attr_probs_all, attrs_all, sizes) for i, (boxes, obj_scores, attr_probs, attrs, size) in enumerate(zipped): for nms_t in self.nms_thresh: outputs = do_nms( boxes, obj_scores, size, self.score_thresh, nms_t, self.min_detections, self.max_detections, ) if outputs is not None: max_boxes, max_scores, classes, ids = outputs break if scales is not None: scale_yx = scales[i] max_boxes[:, 0::2] *= scale_yx[1] max_boxes[:, 1::2] *= scale_yx[0] final_results.append( ( max_boxes, classes, max_scores, attrs[ids], attr_probs[ids], features[i][ids], ) ) boxes, classes, class_probs, attrs, attr_probs, roi_features = map(list, zip(*final_results)) return boxes, classes, class_probs, attrs, attr_probs, roi_features def training(self, obj_logits, attr_logits, box_deltas, pred_boxes, features, sizes): pass def __call__( self, obj_logits, attr_logits, box_deltas, pred_boxes, features, sizes, scales=None, ): if self.training: raise NotImplementedError() return self.inference( obj_logits, attr_logits, box_deltas, pred_boxes, features, sizes, scales=scales, ) class Res5ROIHeads(nn.Module): """ ROIHeads perform all per-region computation in an R-CNN. It contains logic of cropping the regions, extract per-region features (by the res-5 block in this case), and make per-region predictions. """ def __init__(self, cfg, input_shape): super().__init__() self.batch_size_per_image = cfg.RPN.BATCH_SIZE_PER_IMAGE self.positive_sample_fraction = cfg.ROI_HEADS.POSITIVE_FRACTION self.in_features = cfg.ROI_HEADS.IN_FEATURES self.num_classes = cfg.ROI_HEADS.NUM_CLASSES self.proposal_append_gt = cfg.ROI_HEADS.PROPOSAL_APPEND_GT self.feature_strides = {k: v.stride for k, v in input_shape.items()} self.feature_channels = {k: v.channels for k, v in input_shape.items()} self.cls_agnostic_bbox_reg = cfg.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG self.stage_channel_factor = 2**3 # res5 is 8x res2 self.out_channels = cfg.RESNETS.RES2_OUT_CHANNELS * self.stage_channel_factor # self.proposal_matcher = Matcher( # cfg.ROI_HEADS.IOU_THRESHOLDS, # cfg.ROI_HEADS.IOU_LABELS, # allow_low_quality_matches=False, # ) pooler_resolution = cfg.ROI_BOX_HEAD.POOLER_RESOLUTION pooler_scales = (1.0 / self.feature_strides[self.in_features[0]],) sampling_ratio = cfg.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO res5_halve = cfg.ROI_BOX_HEAD.RES5HALVE use_attr = cfg.ROI_BOX_HEAD.ATTR num_attrs = cfg.ROI_BOX_HEAD.NUM_ATTRS self.pooler = ROIPooler( output_size=pooler_resolution, scales=pooler_scales, sampling_ratio=sampling_ratio, ) self.res5 = self._build_res5_block(cfg) if not res5_halve: """ Modifications for VG in RoI heads: 1. Change the stride of conv1 and shortcut in Res5.Block1 from 2 to 1 2. Modifying all conv2 with (padding: 1 --> 2) and (dilation: 1 --> 2) """ self.res5[0].conv1.stride = (1, 1) self.res5[0].shortcut.stride = (1, 1) for i in range(3): self.res5[i].conv2.padding = (2, 2) self.res5[i].conv2.dilation = (2, 2) self.box_predictor = FastRCNNOutputLayers( self.out_channels, self.num_classes, self.cls_agnostic_bbox_reg, use_attr=use_attr, num_attrs=num_attrs, ) def _build_res5_block(self, cfg): stage_channel_factor = self.stage_channel_factor # res5 is 8x res2 num_groups = cfg.RESNETS.NUM_GROUPS width_per_group = cfg.RESNETS.WIDTH_PER_GROUP bottleneck_channels = num_groups * width_per_group * stage_channel_factor out_channels = self.out_channels stride_in_1x1 = cfg.RESNETS.STRIDE_IN_1X1 norm = cfg.RESNETS.NORM blocks = ResNet.make_stage( BottleneckBlock, 3, first_stride=2, in_channels=out_channels // 2, bottleneck_channels=bottleneck_channels, out_channels=out_channels, num_groups=num_groups, norm=norm, stride_in_1x1=stride_in_1x1, ) return nn.Sequential(*blocks) def _shared_roi_transform(self, features, boxes): x = self.pooler(features, boxes) return self.res5(x) def forward(self, features, proposal_boxes, gt_boxes=None): if self.training: """ see https://github.com/airsplay/py-bottom-up-attention/\ blob/master/detectron2/modeling/roi_heads/roi_heads.py """ raise NotImplementedError() assert not proposal_boxes[0].requires_grad box_features = self._shared_roi_transform(features, proposal_boxes) feature_pooled = box_features.mean(dim=[2, 3]) # pooled to 1x1 obj_logits, attr_logits, pred_proposal_deltas = self.box_predictor(feature_pooled) return obj_logits, attr_logits, pred_proposal_deltas, feature_pooled class AnchorGenerator(nn.Module): """ For a set of image sizes and feature maps, computes a set of anchors. """ def __init__(self, cfg, input_shape: List[ShapeSpec]): super().__init__() sizes = cfg.ANCHOR_GENERATOR.SIZES aspect_ratios = cfg.ANCHOR_GENERATOR.ASPECT_RATIOS self.strides = [x.stride for x in input_shape] self.offset = cfg.ANCHOR_GENERATOR.OFFSET assert 0.0 <= self.offset < 1.0, self.offset """ sizes (list[list[int]]): sizes[i] is the list of anchor sizes for feat map i 1. given in absolute lengths in units of the input image; 2. they do not dynamically scale if the input image size changes. aspect_ratios (list[list[float]]) strides (list[int]): stride of each input feature. """ self.num_features = len(self.strides) self.cell_anchors = nn.ParameterList(self._calculate_anchors(sizes, aspect_ratios)) self._spacial_feat_dim = 4 def _calculate_anchors(self, sizes, aspect_ratios): # If one size (or aspect ratio) is specified and there are multiple feature # maps, then we "broadcast" anchors of that single size (or aspect ratio) if len(sizes) == 1: sizes *= self.num_features if len(aspect_ratios) == 1: aspect_ratios *= self.num_features assert self.num_features == len(sizes) assert self.num_features == len(aspect_ratios) cell_anchors = [self.generate_cell_anchors(s, a).float() for s, a in zip(sizes, aspect_ratios)] return cell_anchors @property def box_dim(self): return self._spacial_feat_dim @property def num_cell_anchors(self): """ Returns: list[int]: Each int is the number of anchors at every pixel location, on that feature map. """ return [len(cell_anchors) for cell_anchors in self.cell_anchors] def grid_anchors(self, grid_sizes): anchors = [] for size, stride, base_anchors in zip(grid_sizes, self.strides, self.cell_anchors): shift_x, shift_y = _create_grid_offsets(size, stride, self.offset, base_anchors.device) shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=1) anchors.append((shifts.view(-1, 1, 4) + base_anchors.view(1, -1, 4)).reshape(-1, 4)) return anchors def generate_cell_anchors(self, sizes=(32, 64, 128, 256, 512), aspect_ratios=(0.5, 1, 2)): """ anchors are continuous geometric rectangles centered on one feature map point sample. We can later build the set of anchors for the entire feature map by tiling these tensors """ anchors = [] for size in sizes: area = size**2.0 for aspect_ratio in aspect_ratios: w = math.sqrt(area / aspect_ratio) h = aspect_ratio * w x0, y0, x1, y1 = -w / 2.0, -h / 2.0, w / 2.0, h / 2.0 anchors.append([x0, y0, x1, y1]) return nn.Parameter(torch.tensor(anchors)) def forward(self, features): """ Args: features List[torch.Tensor]: list of feature maps on which to generate anchors. Returns: torch.Tensor: a list of #image elements. """ num_images = features[0].size(0) grid_sizes = [feature_map.shape[-2:] for feature_map in features] anchors_over_all_feature_maps = self.grid_anchors(grid_sizes) anchors_over_all_feature_maps = torch.stack(anchors_over_all_feature_maps) return anchors_over_all_feature_maps.unsqueeze(0).repeat_interleave(num_images, dim=0) class RPNHead(nn.Module): """ RPN classification and regression heads. Uses a 3x3 conv to produce a shared hidden state from which one 1x1 conv predicts objectness logits for each anchor and a second 1x1 conv predicts bounding-box deltas specifying how to deform each anchor into an object proposal. """ def __init__(self, cfg, input_shape: List[ShapeSpec]): super().__init__() # Standard RPN is shared across levels: in_channels = [s.channels for s in input_shape] assert len(set(in_channels)) == 1, "Each level must have the same channel!" in_channels = in_channels[0] anchor_generator = AnchorGenerator(cfg, input_shape) num_cell_anchors = anchor_generator.num_cell_anchors box_dim = anchor_generator.box_dim assert len(set(num_cell_anchors)) == 1, "Each level must have the same number of cell anchors" num_cell_anchors = num_cell_anchors[0] if cfg.PROPOSAL_GENERATOR.HIDDEN_CHANNELS == -1: hid_channels = in_channels else: hid_channels = cfg.PROPOSAL_GENERATOR.HIDDEN_CHANNELS # Modifications for VG in RPN (modeling/proposal_generator/rpn.py) # Use hidden dim instead fo the same dim as Res4 (in_channels) # 3x3 conv for the hidden representation self.conv = nn.Conv2d(in_channels, hid_channels, kernel_size=3, stride=1, padding=1) # 1x1 conv for predicting objectness logits self.objectness_logits = nn.Conv2d(hid_channels, num_cell_anchors, kernel_size=1, stride=1) # 1x1 conv for predicting box2box transform deltas self.anchor_deltas = nn.Conv2d(hid_channels, num_cell_anchors * box_dim, kernel_size=1, stride=1) for layer in [self.conv, self.objectness_logits, self.anchor_deltas]: nn.init.normal_(layer.weight, std=0.01) nn.init.constant_(layer.bias, 0) def forward(self, features): """ Args: features (list[Tensor]): list of feature maps """ pred_objectness_logits = [] pred_anchor_deltas = [] for x in features: t = nn.functional.relu(self.conv(x)) pred_objectness_logits.append(self.objectness_logits(t)) pred_anchor_deltas.append(self.anchor_deltas(t)) return pred_objectness_logits, pred_anchor_deltas class RPN(nn.Module): """ Region Proposal Network, introduced by the Faster R-CNN paper. """ def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]): super().__init__() self.min_box_side_len = cfg.PROPOSAL_GENERATOR.MIN_SIZE self.in_features = cfg.RPN.IN_FEATURES self.nms_thresh = cfg.RPN.NMS_THRESH self.batch_size_per_image = cfg.RPN.BATCH_SIZE_PER_IMAGE self.positive_fraction = cfg.RPN.POSITIVE_FRACTION self.smooth_l1_beta = cfg.RPN.SMOOTH_L1_BETA self.loss_weight = cfg.RPN.LOSS_WEIGHT self.pre_nms_topk = { True: cfg.RPN.PRE_NMS_TOPK_TRAIN, False: cfg.RPN.PRE_NMS_TOPK_TEST, } self.post_nms_topk = { True: cfg.RPN.POST_NMS_TOPK_TRAIN, False: cfg.RPN.POST_NMS_TOPK_TEST, } self.boundary_threshold = cfg.RPN.BOUNDARY_THRESH self.anchor_generator = AnchorGenerator(cfg, [input_shape[f] for f in self.in_features]) self.box2box_transform = Box2BoxTransform(weights=cfg.RPN.BBOX_REG_WEIGHTS) self.anchor_matcher = Matcher( cfg.RPN.IOU_THRESHOLDS, cfg.RPN.IOU_LABELS, allow_low_quality_matches=True, ) self.rpn_head = RPNHead(cfg, [input_shape[f] for f in self.in_features]) def training(self, images, image_shapes, features, gt_boxes): pass def inference(self, outputs, images, image_shapes, features, gt_boxes=None): outputs = find_top_rpn_proposals( outputs.predict_proposals(), outputs.predict_objectness_logits(), images, image_shapes, self.nms_thresh, self.pre_nms_topk[self.training], self.post_nms_topk[self.training], self.min_box_side_len, self.training, ) results = [] for img in outputs: im_boxes, img_box_logits = img img_box_logits, inds = img_box_logits.sort(descending=True) im_boxes = im_boxes[inds] results.append((im_boxes, img_box_logits)) (proposal_boxes, logits) = tuple(map(list, zip(*results))) return proposal_boxes, logits def forward(self, images, image_shapes, features, gt_boxes=None): """ Args: images (torch.Tensor): input images of length `N` features (dict[str: Tensor]) gt_instances """ # features is dict, key = block level, v = feature_map features = [features[f] for f in self.in_features] pred_objectness_logits, pred_anchor_deltas = self.rpn_head(features) anchors = self.anchor_generator(features) outputs = RPNOutputs( self.box2box_transform, self.anchor_matcher, self.batch_size_per_image, self.positive_fraction, images, pred_objectness_logits, pred_anchor_deltas, anchors, self.boundary_threshold, gt_boxes, self.smooth_l1_beta, ) # For RPN-only models, the proposals are the final output if self.training: raise NotImplementedError() return self.training(outputs, images, image_shapes, features, gt_boxes) else: return self.inference(outputs, images, image_shapes, features, gt_boxes) class FastRCNNOutputLayers(nn.Module): """ Two linear layers for predicting Fast R-CNN outputs: (1) proposal-to-detection box regression deltas (2) classification scores """ def __init__( self, input_size, num_classes, cls_agnostic_bbox_reg, box_dim=4, use_attr=False, num_attrs=-1, ): """ Args: input_size (int): channels, or (channels, height, width) num_classes (int) cls_agnostic_bbox_reg (bool) box_dim (int) """ super().__init__() if not isinstance(input_size, int): input_size = np.prod(input_size) # (do + 1 for background class) self.cls_score = nn.Linear(input_size, num_classes + 1) num_bbox_reg_classes = 1 if cls_agnostic_bbox_reg else num_classes self.bbox_pred = nn.Linear(input_size, num_bbox_reg_classes * box_dim) self.use_attr = use_attr if use_attr: """ Modifications for VG in RoI heads Embedding: {num_classes + 1} --> {input_size // 8} Linear: {input_size + input_size // 8} --> {input_size // 4} Linear: {input_size // 4} --> {num_attrs + 1} """ self.cls_embedding = nn.Embedding(num_classes + 1, input_size // 8) self.fc_attr = nn.Linear(input_size + input_size // 8, input_size // 4) self.attr_score = nn.Linear(input_size // 4, num_attrs + 1) nn.init.normal_(self.cls_score.weight, std=0.01) nn.init.normal_(self.bbox_pred.weight, std=0.001) for item in [self.cls_score, self.bbox_pred]: nn.init.constant_(item.bias, 0) def forward(self, roi_features): if roi_features.dim() > 2: roi_features = torch.flatten(roi_features, start_dim=1) scores = self.cls_score(roi_features) proposal_deltas = self.bbox_pred(roi_features) if self.use_attr: _, max_class = scores.max(-1) # [b, c] --> [b] cls_emb = self.cls_embedding(max_class) # [b] --> [b, 256] roi_features = torch.cat([roi_features, cls_emb], -1) # [b, 2048] + [b, 256] --> [b, 2304] roi_features = self.fc_attr(roi_features) roi_features = nn.functional.relu(roi_features) attr_scores = self.attr_score(roi_features) return scores, attr_scores, proposal_deltas else: return scores, proposal_deltas class GeneralizedRCNN(nn.Module): def __init__(self, cfg): super().__init__() self.device = torch.device(cfg.MODEL.DEVICE) self.backbone = build_backbone(cfg) self.proposal_generator = RPN(cfg, self.backbone.output_shape()) self.roi_heads = Res5ROIHeads(cfg, self.backbone.output_shape()) self.roi_outputs = ROIOutputs(cfg) self.to(self.device) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): config = kwargs.pop("config", None) state_dict = kwargs.pop("state_dict", None) cache_dir = kwargs.pop("cache_dir", None) from_tf = kwargs.pop("from_tf", False) force_download = kwargs.pop("force_download", False) resume_download = kwargs.pop("resume_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", False) use_cdn = kwargs.pop("use_cdn", True) # Load config if we don't provide a configuration if not isinstance(config, Config): config_path = config if config is not None else pretrained_model_name_or_path # try: config = Config.from_pretrained( config_path, cache_dir=cache_dir, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, ) # Load model if pretrained_model_name_or_path is not None: if os.path.isdir(pretrained_model_name_or_path): if os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)): # Load from a PyTorch checkpoint archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME) else: raise EnvironmentError( "Error no file named {} found in directory {} ".format( WEIGHTS_NAME, pretrained_model_name_or_path, ) ) elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path): archive_file = pretrained_model_name_or_path elif os.path.isfile(pretrained_model_name_or_path + ".index"): assert from_tf, "We found a TensorFlow checkpoint at {}, please set from_tf to True to load from this checkpoint".format( pretrained_model_name_or_path + ".index" ) archive_file = pretrained_model_name_or_path + ".index" else: archive_file = hf_bucket_url( pretrained_model_name_or_path, filename=WEIGHTS_NAME, use_cdn=use_cdn, ) try: # Load from URL or cache if already cached resolved_archive_file = cached_path( archive_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, ) if resolved_archive_file is None: raise EnvironmentError except EnvironmentError: msg = f"Can't load weights for '{pretrained_model_name_or_path}'." raise EnvironmentError(msg) if resolved_archive_file == archive_file: print("loading weights file {}".format(archive_file)) else: print("loading weights file {} from cache at {}".format(archive_file, resolved_archive_file)) else: resolved_archive_file = None # Instantiate model. model = cls(config) if state_dict is None: try: try: state_dict = torch.load(resolved_archive_file, map_location="cpu") except Exception: state_dict = load_checkpoint(resolved_archive_file) except Exception: raise OSError( "Unable to load weights from pytorch checkpoint file. " "If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True. " ) missing_keys = [] unexpected_keys = [] error_msgs = [] # Convert old format to new format if needed from a PyTorch state_dict old_keys = [] new_keys = [] for key in state_dict.keys(): new_key = None if "gamma" in key: new_key = key.replace("gamma", "weight") if "beta" in key: new_key = key.replace("beta", "bias") if new_key: old_keys.append(key) new_keys.append(new_key) for old_key, new_key in zip(old_keys, new_keys): state_dict[new_key] = state_dict.pop(old_key) # copy state_dict so _load_from_state_dict can modify it metadata = getattr(state_dict, "_metadata", None) state_dict = state_dict.copy() if metadata is not None: state_dict._metadata = metadata model_to_load = model model_to_load.load_state_dict(state_dict) if model.__class__.__name__ != model_to_load.__class__.__name__: base_model_state_dict = model_to_load.state_dict().keys() head_model_state_dict_without_base_prefix = [ key.split(cls.base_model_prefix + ".")[-1] for key in model.state_dict().keys() ] missing_keys.extend(head_model_state_dict_without_base_prefix - base_model_state_dict) if len(unexpected_keys) > 0: print( f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when" f" initializing {model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are" f" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task or" " with another architecture (e.g. initializing a BertForSequenceClassification model from a" " BertForPreTraining model).\n- This IS NOT expected if you are initializing" f" {model.__class__.__name__} from the checkpoint of a model that you expect to be exactly identical" " (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)." ) else: print(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n") if len(missing_keys) > 0: print( f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at" f" {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\nYou should probably" " TRAIN this model on a down-stream task to be able to use it for predictions and inference." ) else: print( f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at" f" {pretrained_model_name_or_path}.\nIf your task is similar to the task the model of the checkpoint" f" was trained on, you can already use {model.__class__.__name__} for predictions without further" " training." ) if len(error_msgs) > 0: raise RuntimeError( "Error(s) in loading state_dict for {}:\n\t{}".format( model.__class__.__name__, "\n\t".join(error_msgs) ) ) # Set model in evaluation mode to deactivate DropOut modules by default model.eval() return model def forward( self, images, image_shapes, gt_boxes=None, proposals=None, scales_yx=None, **kwargs, ): """ kwargs: max_detections (int), return_tensors {"np", "pt", None}, padding {None, "max_detections"}, pad_value (int), location = {"cuda", "cpu"} """ if self.training: raise NotImplementedError() return self.inference( images=images, image_shapes=image_shapes, gt_boxes=gt_boxes, proposals=proposals, scales_yx=scales_yx, **kwargs, ) @torch.no_grad() def inference( self, images, image_shapes, gt_boxes=None, proposals=None, scales_yx=None, **kwargs, ): # run images through backbone original_sizes = image_shapes * scales_yx features = self.backbone(images) # generate proposals if none are available if proposals is None: proposal_boxes, _ = self.proposal_generator(images, image_shapes, features, gt_boxes) else: assert proposals is not None # pool object features from either gt_boxes, or from proposals obj_logits, attr_logits, box_deltas, feature_pooled = self.roi_heads(features, proposal_boxes, gt_boxes) # prepare FRCNN Outputs and select top proposals boxes, classes, class_probs, attrs, attr_probs, roi_features = self.roi_outputs( obj_logits=obj_logits, attr_logits=attr_logits, box_deltas=box_deltas, pred_boxes=proposal_boxes, features=feature_pooled, sizes=image_shapes, scales=scales_yx, ) # will we pad??? subset_kwargs = { "max_detections": kwargs.get("max_detections", None), "return_tensors": kwargs.get("return_tensors", None), "pad_value": kwargs.get("pad_value", 0), "padding": kwargs.get("padding", None), } preds_per_image = torch.tensor([p.size(0) for p in boxes]) boxes = pad_list_tensors(boxes, preds_per_image, **subset_kwargs) classes = pad_list_tensors(classes, preds_per_image, **subset_kwargs) class_probs = pad_list_tensors(class_probs, preds_per_image, **subset_kwargs) attrs = pad_list_tensors(attrs, preds_per_image, **subset_kwargs) attr_probs = pad_list_tensors(attr_probs, preds_per_image, **subset_kwargs) roi_features = pad_list_tensors(roi_features, preds_per_image, **subset_kwargs) subset_kwargs["padding"] = None preds_per_image = pad_list_tensors(preds_per_image, None, **subset_kwargs) sizes = pad_list_tensors(image_shapes, None, **subset_kwargs) normalized_boxes = norm_box(boxes, original_sizes) return OrderedDict( { "obj_ids": classes, "obj_probs": class_probs, "attr_ids": attrs, "attr_probs": attr_probs, "boxes": boxes, "sizes": sizes, "preds_per_image": preds_per_image, "roi_features": roi_features, "normalized_boxes": normalized_boxes, } )
transformers/examples/research_projects/lxmert/modeling_frcnn.py/0
{ "file_path": "transformers/examples/research_projects/lxmert/modeling_frcnn.py", "repo_id": "transformers", "token_count": 34766 }
300
from .configuration_bert_masked import MaskedBertConfig from .modeling_bert_masked import ( MaskedBertForMultipleChoice, MaskedBertForQuestionAnswering, MaskedBertForSequenceClassification, MaskedBertForTokenClassification, MaskedBertModel, ) from .modules import *
transformers/examples/research_projects/movement-pruning/emmental/__init__.py/0
{ "file_path": "transformers/examples/research_projects/movement-pruning/emmental/__init__.py", "repo_id": "transformers", "token_count": 99 }
301
# coding=utf-8 # Copyright 2018 The Google Flax Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Callable, Dict, Tuple import flax.linen as nn import jax import jax.numpy as jnp import numpy as np from jax.random import PRNGKey from modeling_flax_performer_utils import make_fast_softmax_attention from transformers.file_utils import add_start_docstrings from transformers.modeling_flax_utils import ACT2FN from transformers.models.bert.configuration_bert import BertConfig from transformers.models.bert.modeling_flax_bert import FlaxBertOnlyMLMHead, FlaxBertPreTrainedModel from transformers.utils import logging logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "BertConfig" _TOKENIZER_FOR_DOC = "BertTokenizer" BERT_START_DOCSTRING = r""" This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__ subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ BERT_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`~transformers.BertTokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`): Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, 1]``: - 0 corresponds to a `sentence A` token, - 1 corresponds to a `sentence B` token. `What are token type IDs? <../glossary.html#token-type-ids>`_ position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. `What are position IDs? <../glossary.html#position-ids>`_ head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. """ class FlaxPerformerLayerNorm(nn.Module): """ Layer normalization (https://arxiv.org/abs/1607.06450). Operates on the last axis of the input data. """ epsilon: float = 1e-6 dtype: jnp.dtype = jnp.float32 # the dtype of the computation bias: bool = True # If True, bias (beta) is added. scale: bool = True # If True, multiply by scale (gamma). When the next layer is linear # (also e.g. nn.relu), this can be disabled since the scaling will be # done by the next layer. bias_init: jnp.ndarray = nn.initializers.zeros scale_init: jnp.ndarray = nn.initializers.ones @nn.compact def __call__(self, x): """ Applies layer normalization on the input. It normalizes the activations of the layer for each given example in a batch independently, rather than across a batch like Batch Normalization. i.e. applies a transformation that maintains the mean activation within each example close to 0 and the activation standard deviation close to 1 Args: x: the inputs Returns: Normalized inputs (the same shape as inputs). """ features = x.shape[-1] mean = jnp.mean(x, axis=-1, keepdims=True) mean2 = jnp.mean(jax.lax.square(x), axis=-1, keepdims=True) var = mean2 - jax.lax.square(mean) mul = jax.lax.rsqrt(var + self.epsilon) if self.scale: mul = mul * jnp.asarray(self.param("gamma", self.scale_init, (features,)), self.dtype) y = (x - mean) * mul if self.bias: y = y + jnp.asarray(self.param("beta", self.bias_init, (features,)), self.dtype) return y class FlaxPerformerEmbedding(nn.Module): """ Specify a new class for doing the embedding stuff as Flax's one use 'embedding' for the parameter name and PyTorch use 'weight' """ vocab_size: int hidden_size: int emb_init: Callable[..., np.ndarray] = nn.initializers.normal(stddev=0.1) @nn.compact def __call__(self, inputs): embedding = self.param("weight", self.emb_init, (self.vocab_size, self.hidden_size)) return jnp.take(embedding, inputs, axis=0) class FlaxPerformerEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" vocab_size: int hidden_size: int type_vocab_size: int max_length: int @nn.compact def __call__(self, input_ids, token_type_ids, position_ids, attention_mask): # Embed w_emb = FlaxPerformerEmbedding(self.vocab_size, self.hidden_size, name="word_embeddings")( jnp.atleast_2d(input_ids.astype("i4")) ) p_emb = FlaxPerformerEmbedding(self.max_length, self.hidden_size, name="position_embeddings")( jnp.atleast_2d(position_ids.astype("i4")) ) t_emb = FlaxPerformerEmbedding(self.type_vocab_size, self.hidden_size, name="token_type_embeddings")( jnp.atleast_2d(token_type_ids.astype("i4")) ) # Sum all embeddings summed_emb = w_emb + jnp.broadcast_to(p_emb, w_emb.shape) + t_emb # Layer Norm layer_norm = FlaxPerformerLayerNorm(name="layer_norm")(summed_emb) return layer_norm class FlaxPerformerAttention(nn.Module): num_heads: int head_size: int @nn.compact def __call__(self, hidden_state, attention_mask): single_head_dim = self.head_size // self.num_heads fast_softmax_attention = make_fast_softmax_attention(qkv_dim=single_head_dim) self_att = nn.attention.SelfAttention( num_heads=self.num_heads, qkv_features=self.head_size, name="self", attention_fn=fast_softmax_attention )(hidden_state, attention_mask) layer_norm = FlaxPerformerLayerNorm(name="layer_norm")(self_att + hidden_state) return layer_norm class FlaxPerformerIntermediate(nn.Module): output_size: int hidden_act: str = "gelu" @nn.compact def __call__(self, hidden_state): # TODO: Add ACT2FN reference to change activation function dense = nn.Dense(features=self.output_size, name="dense")(hidden_state) return ACT2FN[self.hidden_act](dense) class FlaxPerformerOutput(nn.Module): @nn.compact def __call__(self, intermediate_output, attention_output): hidden_state = nn.Dense(attention_output.shape[-1], name="dense")(intermediate_output) hidden_state = FlaxPerformerLayerNorm(name="layer_norm")(hidden_state + attention_output) return hidden_state class FlaxPerformerLayer(nn.Module): num_heads: int head_size: int intermediate_size: int hidden_act: str = "gelu" @nn.compact def __call__(self, hidden_state, attention_mask): attention = FlaxPerformerAttention(self.num_heads, self.head_size, name="attention")( hidden_state, attention_mask ) intermediate = FlaxPerformerIntermediate( self.intermediate_size, name="intermediate", hidden_act=self.hidden_act )(attention) output = FlaxPerformerOutput(name="output")(intermediate, attention) return output class FlaxPerformerLayerCollection(nn.Module): """ Stores N BertLayer(s) """ num_layers: int num_heads: int head_size: int intermediate_size: int hidden_act: str = "gelu" @nn.compact def __call__(self, inputs, attention_mask): assert self.num_layers > 0, f"num_layers should be >= 1, got ({self.num_layers})" # Initialize input / output input_i = inputs # Forward over all encoders for i in range(self.num_layers): layer = FlaxPerformerLayer( self.num_heads, self.head_size, self.intermediate_size, hidden_act=self.hidden_act, name=f"{i}" ) input_i = layer(input_i, attention_mask) return input_i class FlaxPerformerEncoder(nn.Module): num_layers: int num_heads: int head_size: int intermediate_size: int hidden_act: str = "gelu" @nn.compact def __call__(self, hidden_state, attention_mask): layer = FlaxPerformerLayerCollection( self.num_layers, self.num_heads, self.head_size, self.intermediate_size, name="layer", hidden_act=self.hidden_act, )(hidden_state, attention_mask) return layer class FlaxPerformerPooler(nn.Module): @nn.compact def __call__(self, hidden_state): cls_token = hidden_state[:, 0] out = nn.Dense(hidden_state.shape[-1], name="dense")(cls_token) return jax.lax.tanh(out) class FlaxPerformerModule(nn.Module): vocab_size: int hidden_size: int type_vocab_size: int max_length: int num_encoder_layers: int num_heads: int head_size: int intermediate_size: int hidden_act: str = "gelu" add_pooling_layer: bool = True @nn.compact def __call__(self, input_ids, token_type_ids, position_ids, attention_mask): # Embedding embeddings = FlaxPerformerEmbeddings( self.vocab_size, self.hidden_size, self.type_vocab_size, self.max_length, name="embeddings" )(input_ids, token_type_ids, position_ids, attention_mask) # N stacked encoding layers encoder = FlaxPerformerEncoder( self.num_encoder_layers, self.num_heads, self.head_size, self.intermediate_size, hidden_act=self.hidden_act, name="encoder", )(embeddings, attention_mask) if not self.add_pooling_layer: return encoder pooled = FlaxPerformerPooler(name="pooler")(encoder) return encoder, pooled @add_start_docstrings( "The bare Bert Model transformer outputting raw hidden-states without any specific head on top.", BERT_START_DOCSTRING, ) class FlaxPerformerModel(FlaxBertPreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in `Attention is all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. """ model_class = FlaxPerformerModule config_class = BertConfig base_model_prefix = "bert" @staticmethod def convert_from_pytorch(pt_state: Dict, config: BertConfig) -> Dict: jax_state = dict(pt_state) # Need to change some parameters name to match Flax names so that we don't have to fork any layer for key, tensor in pt_state.items(): # Key parts key_parts = set(key.split(".")) # Every dense layer has "kernel" parameters instead of "weight" if "dense.weight" in key: del jax_state[key] key = key.replace("weight", "kernel") jax_state[key] = tensor # SelfAttention needs also to replace "weight" by "kernel" if {"query", "key", "value"} & key_parts: # Flax SelfAttention decomposes the heads (num_head, size // num_heads) if "bias" in key: jax_state[key] = tensor.reshape((config.num_attention_heads, -1)) elif "weight": del jax_state[key] key = key.replace("weight", "kernel") tensor = tensor.reshape((config.num_attention_heads, -1, config.hidden_size)).transpose((2, 0, 1)) jax_state[key] = tensor # SelfAttention output is not a separate layer, remove one nesting if "attention.output.dense" in key: del jax_state[key] key = key.replace("attention.output.dense", "attention.self.out") jax_state[key] = tensor # SelfAttention output is not a separate layer, remove nesting on layer norm if "attention.output.LayerNorm" in key: del jax_state[key] key = key.replace("attention.output.LayerNorm", "attention.LayerNorm") jax_state[key] = tensor # There are some transposed parameters w.r.t their PyTorch counterpart if "intermediate.dense.kernel" in key or "output.dense.kernel" in key: jax_state[key] = tensor.T # Self Attention output projection needs to be transposed if "out.kernel" in key: jax_state[key] = tensor.reshape((config.hidden_size, config.num_attention_heads, -1)).transpose( 1, 2, 0 ) # Pooler needs to transpose its kernel if "pooler.dense.kernel" in key: jax_state[key] = tensor.T # Handle LayerNorm conversion if "LayerNorm" in key: del jax_state[key] # Replace LayerNorm by layer_norm new_key = key.replace("LayerNorm", "layer_norm") if "weight" in key: new_key = new_key.replace("weight", "gamma") elif "bias" in key: new_key = new_key.replace("bias", "beta") jax_state[new_key] = tensor return jax_state def __init__( self, config: BertConfig, input_shape: Tuple = (1, 1), seed: int = 0, dtype: jnp.dtype = jnp.float32, **kwargs ): module = FlaxPerformerModule( vocab_size=config.vocab_size, hidden_size=config.hidden_size, type_vocab_size=config.type_vocab_size, max_length=config.max_position_embeddings, num_encoder_layers=config.num_hidden_layers, num_heads=config.num_attention_heads, head_size=config.hidden_size, intermediate_size=config.intermediate_size, dropout_rate=config.hidden_dropout_prob, hidden_act=config.hidden_act, ) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype) @property def module(self) -> nn.Module: return self._module def __call__( self, input_ids, token_type_ids=None, position_ids=None, dropout_rng: PRNGKey = None, attention_mask=None ): input_ids, attention_mask, token_type_ids, position_ids = self._check_inputs( input_ids, attention_mask, token_type_ids, position_ids ) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng return self.module.apply( {"params": self.params}, jnp.array(input_ids, dtype="i4"), jnp.array(token_type_ids, dtype="i4"), jnp.array(position_ids, dtype="i4"), jnp.array(attention_mask, dtype="i4"), rng=rngs, ) class FlaxPerformerForMaskedLM(FlaxBertPreTrainedModel): def __init__( self, config: BertConfig, input_shape: Tuple = (1, 1), seed: int = 0, dtype: jnp.dtype = jnp.float32, **kwargs ): module = FlaxPerformerForMaskedLMModule( vocab_size=config.vocab_size, type_vocab_size=config.type_vocab_size, hidden_size=config.hidden_size, intermediate_size=config.intermediate_size, head_size=config.hidden_size, num_heads=config.num_attention_heads, num_encoder_layers=config.num_hidden_layers, max_length=config.max_position_embeddings, hidden_act=config.hidden_act, **kwargs, ) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype) def __call__( self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, params: dict = None, train: bool = False, dropout_rng: PRNGKey = None, ): input_ids, attention_mask, token_type_ids, position_ids = self._check_inputs( input_ids, attention_mask, token_type_ids, position_ids ) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng return self.module.apply( {"params": params or self.params}, jnp.array(input_ids, dtype="i4"), jnp.array(attention_mask, dtype="i4"), jnp.array(token_type_ids, dtype="i4"), jnp.array(position_ids, dtype="i4"), not train, rngs=rngs, ) class FlaxPerformerForMaskedLMModule(nn.Module): vocab_size: int hidden_size: int intermediate_size: int head_size: int num_heads: int num_encoder_layers: int type_vocab_size: int max_length: int hidden_act: str dropout_rate: float = 0.0 dtype: jnp.dtype = jnp.float32 @nn.compact def __call__( self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, deterministic: bool = True ): # Model encoder = FlaxPerformerModule( vocab_size=self.vocab_size, hidden_size=self.hidden_size, type_vocab_size=self.type_vocab_size, max_length=self.max_length, num_encoder_layers=self.num_encoder_layers, num_heads=self.num_heads, head_size=self.hidden_size, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, add_pooling_layer=False, name="bert", )(input_ids, attention_mask, token_type_ids, position_ids) # Compute the prediction scores encoder = nn.Dropout(rate=self.dropout_rate)(encoder, deterministic=deterministic) logits = FlaxBertOnlyMLMHead( vocab_size=self.vocab_size, hidden_act=self.hidden_act, name="cls", dtype=self.dtype )(encoder) return (logits,)
transformers/examples/research_projects/performer/modeling_flax_performer.py/0
{ "file_path": "transformers/examples/research_projects/performer/modeling_flax_performer.py", "repo_id": "transformers", "token_count": 9147 }
302
# Add parent directory to python path to access lightning_base.py export PYTHONPATH="../":"${PYTHONPATH}" # A sample finetuning run, you need to specify data_dir, output_dir and model_name_or_path # run ./examples/rag/finetune_rag.sh --help to see all the possible options python examples/rag/finetune_rag.py \ --data_dir $DATA_DIR \ --output_dir $OUTPUT_DIR \ --model_name_or_path $MODEL_NAME_OR_PATH \ --model_type rag_sequence \ --fp16 \ --gpus 8 \ --profile \ --do_train \ --do_predict \ --n_val -1 \ --train_batch_size 8 \ --eval_batch_size 1 \ --max_source_length 128 \ --max_target_length 25 \ --val_max_target_length 25 \ --test_max_target_length 25 \ --label_smoothing 0.1 \ --dropout 0.1 \ --attention_dropout 0.1 \ --weight_decay 0.001 \ --adam_epsilon 1e-08 \ --max_grad_norm 0.1 \ --lr_scheduler polynomial \ --learning_rate 3e-05 \ --num_train_epochs 100 \ --warmup_steps 500 \ --gradient_accumulation_steps 1 \
transformers/examples/research_projects/rag/finetune_rag.sh/0
{ "file_path": "transformers/examples/research_projects/rag/finetune_rag.sh", "repo_id": "transformers", "token_count": 440 }
303
# Copyright 2022 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #!/bin/bash # Create a virtual environment conda deactivate conda update conda -y conda update anaconda -y pip install --upgrade pip python3 -m pip install --user virtualenv conda create -n strata python=3.9 -y conda activate strata # Install all necessary packages pip install transformers pip install -r requirements.txt # Download and prepare data WORK_DIR="/tmp/strata" rm -rf "${WORK_DIR}" && mkdir -p "${WORK_DIR}" wget https://storage.googleapis.com/gresearch/strata/demo.zip -P "${WORK_DIR}" DEMO_ZIP_FILE="${WORK_DIR}/demo.zip" unzip "${DEMO_ZIP_FILE}" -d "${WORK_DIR}" && rm "${DEMO_ZIP_FILE}" DATA_DIR="${WORK_DIR}/demo/scitail-8" OUTPUT_DIR="/tmp/output" rm -rf "${OUTPUT_DIR}" && mkdir -p "${OUTPUT_DIR}" # Specific hyperparameters MODEL_NAME_OR_PATH="bert-base-uncased" NUM_NODES=1 NUM_TRAINERS=4 LAUNCH_SCRIPT="torchrun --nnodes='${NUM_NODES}' --nproc_per_node='${NUM_TRAINERS}' python -c" MAX_SELFTRAIN_ITERATIONS=100 TRAIN_FILE="train.csv" INFER_FILE="infer.csv" EVAL_FILE="eval_256.csv" MAX_STEPS=100000 # Start self-training ${LAUNCH_SCRIPT} " import os from selftraining import selftrain data_dir = '${DATA_DIR}' parameters_dict = { 'max_selftrain_iterations': ${MAX_SELFTRAIN_ITERATIONS}, 'model_name_or_path': '${MODEL_NAME_OR_PATH}', 'output_dir': '${OUTPUT_DIR}', 'train_file': os.path.join(data_dir, '${TRAIN_FILE}'), 'infer_file': os.path.join(data_dir, '${INFER_FILE}'), 'eval_file': os.path.join(data_dir, '${EVAL_FILE}'), 'evaluation_strategy': 'steps', 'task_name': 'scitail', 'label_list': ['entails', 'neutral'], 'per_device_train_batch_size': 32, 'per_device_eval_batch_size': 8, 'max_length': 128, 'learning_rate': 2e-5, 'max_steps': ${MAX_STEPS}, 'eval_steps': 1, 'early_stopping_patience': 50, 'overwrite_output_dir': True, 'do_filter_by_confidence': False, 'do_filter_by_val_performance': True, 'finetune_on_labeled_data': False, 'seed': 42, } selftrain(**parameters_dict) "
transformers/examples/research_projects/self-training-text-classification/run.sh/0
{ "file_path": "transformers/examples/research_projects/self-training-text-classification/run.sh", "repo_id": "transformers", "token_count": 961 }
304
#!/usr/bin/env bash export PYTHONPATH="../":"${PYTHONPATH}" # From appendix C of paper https://arxiv.org/abs/1912.08777 # Set --gradient_accumulation_steps so that effective batch size is 256 (2*128, 4*64, 8*32, 16*16) python finetune.py \ --learning_rate=1e-4 \ --do_train \ --do_predict \ --n_val 1000 \ --val_check_interval 0.25 \ --max_source_length 512 --max_target_length 56 \ --freeze_embeds --label_smoothing 0.1 --adafactor --task summarization_xsum \ "$@"
transformers/examples/research_projects/seq2seq-distillation/finetune_pegasus_xsum.sh/0
{ "file_path": "transformers/examples/research_projects/seq2seq-distillation/finetune_pegasus_xsum.sh", "repo_id": "transformers", "token_count": 208 }
305
# Fine-Tuning week of XLSR-Wav2Vec2 on 60 languages 🌍 Welcome to the fine-tuning week! The goal of this week is to have state-of-the-art automatic speech recognition (ASR) models in as many languages as possible. The fine-tuning week ends on Friday, the 26th March at midnight PST time. Participants are encouraged to fine-tune the pretrained [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) checkpoint on one or more of the 60 languages of [Common Voice dataset](https://commonvoice.mozilla.org/en/datasets). Furthermore, it is very much appreciated if participants fine-tune XLSR-Wav2Vec2 on a language that is not included in the Common Voice dataset. All fine-tuned models uploaded until Friday, the 26th March midnight PST, will be taken into account for competition, and the best model per language will be awarded a prize if the best model performs reasonably well. The testing data to evaluate the models will be the official [Common Voice dataset](https://commonvoice.mozilla.org/en/datasets) *`test data`* of version 6.1. Again, participants are very much encouraged to fine-tune XLSR-Wav2Vec2 on languages that are not found in the Common Voice dataset since those languages are even more likely to be underrepresented in the speech community. Each model fine-tuned on a language not found in Common Voice, will be evaluated by the Hugging Face team after Friday, the 26th March at midnight PST, and if the model performs reasonably well, the model receives a prize as well. For more information on which data can be used for training, how the models are evaluated exactly, and what type of data preprocessing can be used, please see ["Training and Evaluation Rules"](#training-and-evaluation-rules). **Please keep in mind:** The spirit of the fine-tuning week is to provide state-of-the-art speech recognition in as many languages as possible to the community! So while we encourage healthy competition between people/groups of the same language so that better results are obtained, it is extremely important that we help each other and share our insights with the whole team/community. What matters in the end is what has been achieved by the team as a whole during the fine-tuning week. That being said, we strongly encourage people to share tips & tricks on the forum or Slack, help each other when team members encounter bugs, and work in groups. To make it easier to share and help, forum threads have been created under the name {language} ASR: Fine-Tuning Wav2Vec2, e.g. here. It is very much possible that prizes will be given to groups of people instead of individuals. Also, don't hesitate to ask questions, propose improvements to the organization, to the material given to participants, etc...🤗 ## Table of Contents - [Organization of the fine tuning week](#organization-of-the-fine-tuning-week) - [How to fine tune XLSR Wav2Vec2](#how-to-fine-tune-xlsr-wav2vec2) - [Google colab setup](#google-colab-setup) - [Local machine](#local-machine) - [How to upload my trained checkpoint](#how-to-upload-my-trained-checkpoint) - [How to create the README](#how-to-create-the-readme) - [How to evaluate my trained checkpoint](#how-to-evaluate-my-trained-checkpoint) - [Rules of training and evaluation](#rules-of-training-and-evaluation) - [Tips and tricks](#tips-and-tricks) - [How to combine multiple datasests into one](#how-to-combine-multiple-datasets-into-one) - [How to effectively preprocess the data](#how-to-effectively-preprocess-the-data) - [How to efficiently preproces the data](#how-to-do-efficiently-load-datasets-with-limited-ram-and-hard-drive-space) - [How to do hyperparameter tuning](#how-to-do-hyperparameter-tuning) - [How to preprocess and evaluate character based languages](#how-to-preprocess-and-evaluate-character-based-languages) - [Further reading material](#further-reading-material) - [FAQ](#faq) ## Organization of the fine tuning week The week officially starts on 22.03.2021 and ends on 29.03.2021, but you are more than welcome to start fine-tuning models before the start date. General questions you might have, general problems you encounter, and general tips can be shared directly on the Slack channel (see [this post](https://discuss.huggingface.co/t/open-to-the-community-xlsr-wav2vec2-fine-tuning-week-for-low-resource-languages/4467) on how to be added to Slack). More language-specific questions or specific bugs should be posted on the [forum](https://discuss.huggingface.co/) (feel free to use already existing language-specific threads, *e.g.* [this one](https://discuss.huggingface.co/t/arabic-asr-fine-tuning-wav2vec2/4608) or open a new one if there is no thread for your language yet) or directly on [github](https://github.com/huggingface/transformers) if you think some code or document needs correction/improvement. Starting on Monday, the 22.03.2021, the Hugging Face team will try to provide an overview of currently trained models along with their evaluation results. All the necessary information on: - How to fine-tune the XLSR model - How to upload the model - How to share your evaluation results & training/eval script - What are the training/evaluation rules can be found in the sections below. If something is still unclear, feel free to drop a message in the Slack channel. ## How to fine tune XLSR Wav2Vec2 This chapter gives an in-detail explanation of how to fine-tune [Facebook's multi-lingual Wav2vec2](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on any language of the [Common Voice dataset](https://commonvoice.mozilla.org/en/datasets). Two possible setups can be used to fine-tune Wav2Vec2. The easiest setup is to simply use [google colab](https://colab.research.google.com/). It is possible to train the full model in a *free* google colab, but it is recommended to use google colab pro since it is more stable. The other option is to run a script locally. While this can be more difficult to set up, it also means that you have more control over the training run and probably access to better GPUs than you would have in a google colab. For small datasets, it is usually totally sufficient to train your model in a google colab. For larger and thus more memory-intensive datasets, it is probably better to fine-tune the model locally. For each option, we explain in detail how to fine-tune XLSR-Wav2Vec2 in the following. ### Google colab setup **Note**: Instead of reading the following section, you can simply watch [this](https://www.youtube.com/watch?v=UynYn2C3tI0&ab_channel=PatrickvonPlaten) video, where Patrick explains how to adapt the google colab for your specific language. **1.**: If you plan on training XLSR-Wav2Vec2 in a google colab, you should first make sure to have a valid gmail account. You can sign up for a gmail account [here](https://accounts.google.com/signup/v2/webcreateaccount?hl=en&flowName=GlifWebSignIn&flowEntry=SignUp). Having successfully signed up for gmail, you can now sign in to your account to make sure you are logged in when opening new tabs in your browser. **2.**: Next, head over to the official [Fine-Tune XLSR-Wav2Vec2 with 🤗 Transformes](https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/Fine_Tune_XLSR_Wav2Vec2_on_Turkish_ASR_with_%F0%9F%A4%97_Transformers.ipynb) google colab. The first thing you should do is to make a copy of it - click `->File->Save a copy in Drive`. This should save a copy of the google colab in your google drive. **3.**: Now it is highly recommended to carefully read the google colab without running the cells yet. You should get an understanding of the model is trained and what you will have to change when training the model in a different language. Having done so, you can again head over to [Common Voice](https://commonvoice.mozilla.org/en/datasets) and pick a language you want to fine-tune [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on. Make sure you remember the language code (For each language, you can find it under the field "*Version*". It corresponds to **all characters before the first underscore**. *E.g.* for Greek it is *el*, while for Irish it is *ga-IE*. **4.**: Now you should replace the language code used for the demo of this colab, being *tr* for Turkish with the language code corresponding to the language you just chose in the **second** cell of the google colab. This will load the correct data for your language. **5.**: It is time to start running the google colab! Make sure that you have selected "GPU" as your runtime environment and you can start running the cells one-by-one. Make sure you attentively read the text between the cells to understand what is happening and to eventually correct the cells to improve the fine-tuning script for your language. Things you might want to improve/change: - Data loading. It is very much recommended to use more than just the official training data of the Common Voice dataset. If you find more data on the internet, feel free to use it! Check out the section ["How to combined multiple datasets into one"](#how-to-combine-multiple-datasets-into-one) - Data Processing. You should adapt the data processing to your specific language. In data processing, you should make the data more uniform so that it will be easier for the model to learn how to classify speech in your data. Here it can be really helpful to be proficient in the language to know what can be done to simplify the language without changing the meaning. Data processing methods include, but are not limited to: - Normalizing your data. Make sure all characters are lower-cased. - Remove typographical symbols and punctuation marks. See a list [here](https://en.wikipedia.org/wiki/List_of_typographical_symbols_and_punctuation_marks). Be careful to not remove punctuation marks that can change the meaning of the sentence. *E.g.* you should not remove the single quotation mark `'` in English, as it would change the words `"it's"` to `"its"` which is a different word and has thus a different meaning. For more tips on data processing see ["How to effectively preprocess the data"](#how-to-effectively-preprocess-the-data") - Hyperparameter Tuning. Depending on the size of the data you should probably change the hyperparameters of the google colab. You can change any parameter you like. For more tips and tricks see ["How to do hyperparameter tuning for my language"](#how-to-do-hyperparameter-tuning-for-my-language) When running the google colab make sure that you uncomment the cell corresponding to mounting your google drive to the colab. This cell looks as follows: ```python # from google.colab import drive # drive.mount('/content/gdrive/') ``` Uncomment it, run it, and follow the instructions to mount your google drive. This way you can be sure that the model parameters and created tokenizer & feature extractor files are saved in **your** google drive. Also, make sure that you uncomment the cells corresponding to save the preprocessing files and trained model weights to your drive. Otherwise, you might lose a trained model if you google crashes. You should change the name of your model from `wav2vec2-large-xlsr-turkish-demo` to `wav2vec2-large-xlsr-{your_favorite_name}`. Those cells correspond to: ```python # processor.save_pretrained("/content/gdrive/MyDrive/wav2vec2-large-xlsr-turkish-demo") ``` and the line: ```python output_dir="/content/gdrive/MyDrive/wav2vec2-large-xlsr-turkish-demo", ``` further below (which should already be uncommented). Having finished the training you should find the following files/folders under the folder `wav2vec2-large-xlsr-{your_favorite_name}` in your google drive: - `preprocessor_config.json` - the parameters of the feature extractor - `special_tokens_map.json` - the special token map of the tokenizer - `tokenizer_config.json` - the parameters of the tokenizer - `vocab.json` - the vocabulary of the tokenizer - `checkpoint-{...}/` - the saved checkpoints saved during training. Each checkpoint should contain the files: `config.json`, `optimizer.pt`, `pytorch_model.bin`, `scheduler.pt`, `training_args.bin`. The files `config.json` and `pytorch_model.bin` define your model. If you are happy with your training results it is time to upload your model! Download the following files to your local computer: **`preprocessor_config.json`, `special_tokens_map.json`, `tokenizer_config.json`, `vocab.json`, `config.json`, `pytorch_model.bin`**. Those files fully define a XLSR-Wav2Vec2 model checkpoint. Awesome you have successfully trained a XLSR-Wav2Vec2 model 😎. Now you can jump to the section ["How to upload my trained checkpoint"](#how-to-upload-my-trained-checkpoint) ### Local machine We have provided `run_common_voice.py` script to run fine-tuning on local machine. The script is similar to the colab but allows you to launch training using command line, save and continue training from previous checkpoints and launch training on multiple GPUs. For bigger datasets, we recommend to train Wav2Vec2 locally instead of in a google colab. 1. To begin with, we should clone transformers localy and install all the required packages. First, you need to clone the `transformers` repo with: ```bash $ git clone https://github.com/huggingface/transformers.git ``` Second, head over to the `examples/research_projects/wav2vec2` directory, where the `run_common_voice.py` script is located. ```bash $ cd transformers/examples/research_projects/wav2vec2 ``` Third, install the required packages. The packages are listed in the `requirements.txt` file and can be installed with ```bash $ pip install -r requirements.txt ``` **Note**: Installing the latest version of `torchaudio` will also upgrade `torch` to it's latest stable version. If you are using specific version of `torch` then make sure to use the correct `torchaudio` version compatible with your version of `torch`. By default the `requirements.txt` will install the latest version of `torchaudio`. 2. Next, take a look at the `run_common_voice.py` script to get an understanding of how it works. In short the script does the following: - Load the given common voice dataset - Create vocab for the language - Load the model with given hyperparameters - Pre-process the dataset to input into the model - Run training - Run evaluation 3. The following examples show how you can launch fine-tuning for the common voice dataset. Here we will run the script on the *Turkish* Common Voice dataset for demonstration purposes. **To lanuch fine-tuninig on a single GPU:** ```bash python run_common_voice.py \ --model_name_or_path="facebook/wav2vec2-large-xlsr-53" \ --dataset_config_name="tr" \ # use this argument to specify the language code --output_dir=./wav2vec2-large-xlsr-turkish-demo \ --overwrite_output_dir \ --num_train_epochs="5" \ --per_device_train_batch_size="16" \ --learning_rate="3e-4" \ --warmup_steps="500" \ --evaluation_strategy="steps" \ --save_steps="400" \ --eval_steps="400" \ --logging_steps="400" \ --save_total_limit="3" \ --freeze_feature_extractor \ --feat_proj_dropout="0.0" \ --layerdrop="0.1" \ --gradient_checkpointing \ --fp16 \ --group_by_length \ --do_train --do_eval ``` **To lanuch fine-tuninig on multiple GPUs:** ```bash python -m torch.distributed.launch \ --nproc_per_node 4 run_common_voice.py \ --model_name_or_path="facebook/wav2vec2-large-xlsr-53" \ --dataset_config_name="tr" \ # use this argument to specify the language code --output_dir=./wav2vec2-large-xlsr-turkish-demo \ --overwrite_output_dir \ --num_train_epochs="5" \ --per_device_train_batch_size="16" \ --learning_rate="3e-4" \ --warmup_steps="500" \ --evaluation_strategy="steps" \ --save_steps="400" \ --eval_steps="400" \ --logging_steps="400" \ --save_total_limit="3" \ --freeze_feature_extractor \ --feat_proj_dropout="0.0" \ --layerdrop="0.1" \ --gradient_checkpointing \ --fp16 \ --group_by_length \ --do_train --do_eval ``` The above command will launch the training on 4 GPUs. Use the `--nproc_per_node` option to specify the number of GPUs. Once the training is finished, the model and checkpoints will be saved under the directory specified by the `--output_dir` argument. 4. The script also allows you to resume training from the last saved checkpoint. To resume training from last saved checkpoint remove the `--overwrite_output_dir` option and run the same command again. And to continue training from a specific checkpoint, keep the `--overwrite_output_dir` option and pass the path of the checkpoint as `--model_name_or_path`. As the script is based on the `Trainer` API, refer to the [Trainer docs](https://huggingface.co/transformers/main_classes/trainer.html) for more information about ``Trainer`` and ``TrainingArguments``. [OVH cloud](https://www.ovh.com/world/) has generously offered free compute for this sprint. Please refer to [this video](https://www.youtube.com/watch?v=2hlkWAESMk8&ab_channel=Databuzzword) to get started with OVH. ## How to upload my trained checkpoint To upload your trained checkpoint, you have to create a new model repository on the 🤗 model hub, from this page: https://huggingface.co/new > You can also follow the more in-depth instructions [here](https://huggingface.co/transformers/model_sharing.html) if needed. Having created your model repository on the hub, you should clone it locally: ```bash git lfs install git clone https://huggingface.co/username/your-model-name ``` Then and add the following files that fully define a XLSR-Wav2Vec2 checkpoint into the repository. You should have added the following files. - `preprocessor_config.json` - `special_tokens_map.json` - `tokenizer_config.json` - `vocab.json` - `config.json` - `pytorch_model.bin` Having added the above files, you should run the following to push files to your model repository. ```bash git add . && git commit -m "Add model files" && git push ``` The next **very important** step is to create the model card. For people to use your fine-tuned model it is important to understand: - What kind of model is it? - What is your model useful for? - What data was your model trained on? - How well does your model perform? All these questions should be answered in a model card which is the first thing people see when visiting your model on the hub under `https://huggingface.co/{your_username}/{your_modelname}`. **Note**: It is extremely important that you add this model card or else we cannot find your model and thus cannot take the model into account for the final evaluation. ### How to create the readme The model card is written in markdown (`.md`) and should be added by simply clicking on the "Add model card" button which is found on the top right corner. You are encouraged to copy-paste the following template into your model card. **Make sure that** instead of copying the output of the markdown file you copy the **raw** version of the following part. To get the raw version of this file, simply click on the "`raw`" button on the top right corner of this file next to "`blame`" and copy everything below the marker. Make sure that you read and consequently remove all #TODO: statements from the model card. <======================Copy **raw** version from here========================= --- language: {lang_id} #TODO: replace {lang_id} in your language code here. Make sure the code is one of the *ISO codes* of [this](https://huggingface.co/languages) site. datasets: - common_voice #TODO: remove if you did not use the common voice dataset - TODO: add more datasets if you have used additional datasets. Make sure to use the exact same dataset name as the one found [here](https://huggingface.co/datasets). If the dataset can not be found in the official datasets, just give it a new name metrics: - wer tags: - audio - automatic-speech-recognition - speech - xlsr-fine-tuning-week license: apache-2.0 model-index: - name: {human_readable_name} #TODO: replace {human_readable_name} with a name of your model as it should appear on the leaderboard. It could be something like `Elgeish XLSR Wav2Vec2 Large 53` results: - task: name: Speech Recognition type: automatic-speech-recognition dataset: name: Common Voice {lang_id} #TODO: replace {lang_id} in your language code here. Make sure the code is one of the *ISO codes* of [this](https://huggingface.co/languages) site. type: common_voice args: {lang_id} #TODO: replace {lang_id} in your language code here. Make sure the code is one of the *ISO codes* of [this](https://huggingface.co/languages) site. metrics: - name: Test WER type: wer value: {wer_result_on_test} #TODO (IMPORTANT): replace {wer_result_on_test} with the WER error rate you achieved on the common_voice test set. It should be in the format XX.XX (don't add the % sign here). **Please** remember to fill out this value after you evaluated your model, so that your model appears on the leaderboard. If you fill out this model card before evaluating your model, please remember to edit the model card afterward to fill in your value --- # Wav2Vec2-Large-XLSR-53-{language} #TODO: replace language with your {language}, *e.g.* French Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on {language} using the [Common Voice](https://huggingface.co/datasets/common_voice), ... and ... dataset{s}. #TODO: replace {language} with your language, *e.g.* French and eventually add more datasets that were used and eventually remove common voice if model was not trained on common voice When using this model, make sure that your speech input is sampled at 16kHz. ## Usage The model can be used directly (without a language model) as follows: ```python import torch import torchaudio from datasets import load_dataset from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor test_dataset = load_dataset("common_voice", "{lang_id}", split="test[:2%]") #TODO: replace {lang_id} in your language code here. Make sure the code is one of the *ISO codes* of [this](https://huggingface.co/languages) site. processor = Wav2Vec2Processor.from_pretrained("{model_id}") #TODO: replace {model_id} with your model id. The model id consists of {your_username}/{your_modelname}, *e.g.* `elgeish/wav2vec2-large-xlsr-53-arabic` model = Wav2Vec2ForCTC.from_pretrained("{model_id}") #TODO: replace {model_id} with your model id. The model id consists of {your_username}/{your_modelname}, *e.g.* `elgeish/wav2vec2-large-xlsr-53-arabic` resampler = torchaudio.transforms.Resample(48_000, 16_000) # Preprocessing the datasets. # We need to read the aduio files as arrays def speech_file_to_array_fn(batch): speech_array, sampling_rate = torchaudio.load(batch["path"]) batch["speech"] = resampler(speech_array).squeeze().numpy() return batch test_dataset = test_dataset.map(speech_file_to_array_fn) inputs = processor(test_dataset[:2]["speech"], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits predicted_ids = torch.argmax(logits, dim=-1) print("Prediction:", processor.batch_decode(predicted_ids)) print("Reference:", test_dataset[:2]["sentence"]) ``` ## Evaluation The model can be evaluated as follows on the {language} test data of Common Voice. # TODO: replace #TODO: replace language with your {language}, *e.g.* French ```python import torch import torchaudio from datasets import load_dataset, load_metric from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor import re test_dataset = load_dataset("common_voice", "{lang_id}", split="test") #TODO: replace {lang_id} in your language code here. Make sure the code is one of the *ISO codes* of [this](https://huggingface.co/languages) site. wer = load_metric("wer") processor = Wav2Vec2Processor.from_pretrained("{model_id}") #TODO: replace {model_id} with your model id. The model id consists of {your_username}/{your_modelname}, *e.g.* `elgeish/wav2vec2-large-xlsr-53-arabic` model = Wav2Vec2ForCTC.from_pretrained("{model_id}") #TODO: replace {model_id} with your model id. The model id consists of {your_username}/{your_modelname}, *e.g.* `elgeish/wav2vec2-large-xlsr-53-arabic` model.to("cuda") chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\“]' # TODO: adapt this list to include all special characters you removed from the data resampler = torchaudio.transforms.Resample(48_000, 16_000) # Preprocessing the datasets. # We need to read the aduio files as arrays def speech_file_to_array_fn(batch): batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower() speech_array, sampling_rate = torchaudio.load(batch["path"]) batch["speech"] = resampler(speech_array).squeeze().numpy() return batch test_dataset = test_dataset.map(speech_file_to_array_fn) # Preprocessing the datasets. # We need to read the aduio files as arrays def evaluate(batch): inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits pred_ids = torch.argmax(logits, dim=-1) batch["pred_strings"] = processor.batch_decode(pred_ids) return batch result = test_dataset.map(evaluate, batched=True, batch_size=8) print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"]))) ``` **Test Result**: XX.XX % # TODO: write output of print here. IMPORTANT: Please remember to also replace {wer_result_on_test} at the top of with this value here. tags. ## Training The Common Voice `train`, `validation`, and ... datasets were used for training as well as ... and ... # TODO: adapt to state all the datasets that were used for training. The script used for training can be found [here](...) # TODO: fill in a link to your training script here. If you trained your model in a colab, simply fill in the link here. If you trained the model locally, it would be great if you could upload the training script on github and paste the link here. =======================To here===============================> Your model in then available under *huggingface.co/{your_username}/{your_chosen_xlsr-large_model_name}* for everybody to use 🎉. ## How to evaluate my trained checkpoint Having uploaded your model, you should now evaluate your model in a final step. This should be as simple as copying the evaluation code of your model card into a python script and running it. Make sure to note the final result on the model card **both** under the YAML tags at the very top **and** below your evaluation code under "Test Results". ## Rules of training and evaluation In this section, we will quickly go over what data is allowed to be used as training data, what kind of data preprocessing is allowed be used, and how the model should be evaluated. To make it very simple regarding the first point: **All data except the official common voice `test` data set can be used as training data**. For models trained in a language that is not included in Common Voice, the author of the model is responsible to leave a reasonable amount of data for evaluation. Second, the rules regarding the preprocessing are not that as straight-forward. It is allowed (and recommended) to normalize the data to only have lower-case characters. It is also allowed (and recommended) to remove typographical symbols and punctuation marks. A list of such symbols can *e.g.* be fonud [here](https://en.wikipedia.org/wiki/List_of_typographical_symbols_and_punctuation_marks) - however here we already must be careful. We should **not** remove a symbol that would change the meaning of the words, *e.g.* in English, we should not remove the single quotation mark `'` since it would change the meaning of the word `"it's"` to `"its"` which would then be incorrect. So the golden rule here is to not remove any characters that could change the meaning of a word into another word. This is not always obvious and should be given some consideration. As another example, it is fine to remove the "Hypen-minus" sign "`-`" since it doesn't change the meaninng of a word to another one. *E.g.* "`fine-tuning`" would be changed to "`finetuning`" which has still the same meaning. Since those choices are not always obvious when in doubt feel free to ask on Slack or even better post on the forum, as was done, *e.g.* [here](https://discuss.huggingface.co/t/spanish-asr-fine-tuning-wav2vec2/4586). ## Tips and tricks This section summarizes a couple of tips and tricks across various topics. It will continously be updated during the week. ### How to combine multiple datasets into one Check out [this](https://discuss.huggingface.co/t/how-to-combine-local-data-files-with-an-official-dataset/4685) post. ### How to effectively preprocess the data ### How to do efficiently load datasets with limited ram and hard drive space Check out [this](https://discuss.huggingface.co/t/german-asr-fine-tuning-wav2vec2/4558/8?u=patrickvonplaten) post. ### How to do hyperparameter tuning ### How to preprocess and evaluate character based languages ## Further reading material It is recommended that take some time to read up on how Wav2vec2 works in theory. Getting a better understanding of the theory and the inner mechanisms of the model often helps when fine-tuning the model. **However**, if you don't like reading blog posts/papers, don't worry - it is by no means necessary to go through the theory to fine-tune Wav2Vec2 on your language of choice. If you are interested in learning more about the model though, here are a couple of resources that are important to better understand Wav2Vec2: - [Facebook's Wav2Vec2 blog post](https://ai.facebook.com/blog/wav2vec-state-of-the-art-speech-recognition-through-self-supervision/) - [Official Wav2Vec2 paper](https://arxiv.org/abs/2006.11477) - [Official XLSR Wav2vec2 paper](https://arxiv.org/pdf/2006.13979.pdf) - [Hugging Face Blog](https://huggingface.co/blog/fine-tune-xlsr-wav2vec2) - [How does CTC (Connectionist Temporal Classification) work](https://distill.pub/2017/ctc/) It helps to have a good understanding of the following points: - How was XLSR-Wav2Vec2 pretrained? -> Feature vectors were masked and had to be predicted by the model; very similar in spirit to masked language model of BERT. - What parts of XLSR-Wav2Vec2 are responsible for what? What is the feature extractor part used for? -> extract feature vectors from the 1D raw audio waveform; What is the transformer part doing? -> mapping feature vectors to contextualized feature vectors; ... - What part of the model needs to be fine-tuned? -> The pretrained model **does not** include a language head to classify the contextualized features to letters. This is randomly initialized when loading the pretrained checkpoint and has to be fine-tuned. Also, note that the authors recommend to **not** further fine-tune the feature extractor. - What data was used to XLSR-Wav2Vec2? The checkpoint we will use for further fine-tuning was pretrained on **53** languages. - What languages are considered to be similar by XLSR-Wav2Vec2? In the official [XLSR Wav2Vec2 paper](https://arxiv.org/pdf/2006.13979.pdf), the authors show nicely which languages share a common contextualized latent space. It might be useful for you to extend your training data with data of other languages that are considered to be very similar by the model (or you). ## FAQ - Can a participant fine-tune models for more than one language? Yes! A participant can fine-tune models in as many languages she/he likes - Can a participant use extra data (apart from the common voice data)? Yes! All data except the official common voice `test data` can be used for training. If a participant wants to train a model on a language that is not part of Common Voice (which is very much encouraged!), the participant should make sure that some test data is held out to make sure the model is not overfitting. - Can we fine-tune for high-resource languages? Yes! While we do not really recommend people to fine-tune models in English since there are already so many fine-tuned speech recognition models in English. However, it is very much appreciated if participants want to fine-tune models in other "high-resource" languages, such as French, Spanish, or German. For such cases, one probably needs to train locally and apply might have to apply tricks such as lazy data loading (check the ["Lazy data loading"](#how-to-do-lazy-data-loading) section for more details).
transformers/examples/research_projects/wav2vec2/FINE_TUNE_XLSR_WAV2VEC2.md/0
{ "file_path": "transformers/examples/research_projects/wav2vec2/FINE_TUNE_XLSR_WAV2VEC2.md", "repo_id": "transformers", "token_count": 9587 }
306
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path git_repo_path = Path(__file__).resolve().parents[3] / "src" sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.integrations.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(42) models = {"base": "patrickvonplaten/wav2vec2_tiny_random", "robust": "patrickvonplaten/wav2vec2_tiny_random_robust"} ZERO2 = "zero2" ZERO3 = "zero3" stages = [ZERO2, ZERO3] def custom_name_func(func, param_num, param): # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param param_based_name = parameterized.to_safe_name("_".join(str(x) for x in param.args)) return f"{func.__name__}_{param_based_name}" # Cartesian-product of zero stages with models to test params = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class TestDeepSpeedWav2Vec2(TestCasePlus): @parameterized.expand(params, name_func=custom_name_func) def test_fp32_non_distributed(self, stage, model): self.run_and_check( stage=stage, model=model, distributed=False, fp16=False, ) @require_torch_multi_gpu @parameterized.expand(params, name_func=custom_name_func) def test_fp32_distributed(self, stage, model): self.run_and_check( stage=stage, model=model, distributed=True, fp16=False, ) @parameterized.expand(params, name_func=custom_name_func) def test_fp16_non_distributed(self, stage, model): self.run_and_check( stage=stage, model=model, distributed=False, fp16=True, ) @require_torch_multi_gpu @parameterized.expand(params, name_func=custom_name_func) def test_fp16_distributed(self, stage, model): self.run_and_check( stage=stage, model=model, distributed=True, fp16=True, ) def do_checks(self, output_dir): # XXX: run_asr is premature and doesn't save any results # so all we check for now is that the process didn't fail pass # XXX: need to do better validation beyond just that the run was successful def run_and_check( self, stage: str, model: str, eval_steps: int = 10, distributed: bool = True, quality_checks: bool = True, fp16: bool = True, ): model_name = models[model] output_dir = self.run_trainer( stage=stage, model_name=model_name, eval_steps=eval_steps, num_train_epochs=1, distributed=distributed, fp16=fp16, ) self.do_checks(output_dir) return output_dir def run_trainer( self, stage: str, model_name: str, eval_steps: int = 10, num_train_epochs: int = 1, distributed: bool = True, fp16: bool = True, ): output_dir = self.get_auto_remove_tmp_dir("./xxx", after=False) args = f""" --model_name_or_path {model_name} --dataset_name hf-internal-testing/librispeech_asr_dummy --dataset_config_name clean --train_split_name validation --validation_split_name validation --output_dir {output_dir} --num_train_epochs {str(num_train_epochs)} --per_device_train_batch_size 2 --per_device_eval_batch_size 2 --evaluation_strategy steps --learning_rate 5e-4 --warmup_steps 8 --orthography timit --preprocessing_num_workers 1 --group_by_length --freeze_feature_extractor --report_to none --save_steps 0 --eval_steps {eval_steps} --report_to none """.split() if fp16: args.extend(["--fp16"]) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files ds_args = f"--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json".split() script = [f"{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"] launcher = self.get_launcher(distributed) cmd = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(cmd, env=self.get_env()) return output_dir def get_launcher(self, distributed=False): # 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup # - it won't be able to handle that # 2. for now testing with just 2 gpus max (since some quality tests may give different # results with mode gpus because we use very little data) num_gpus = min(2, get_gpu_count()) if distributed else 1 return f"deepspeed --num_nodes 1 --num_gpus {num_gpus}".split()
transformers/examples/research_projects/wav2vec2/test_wav2vec2_deepspeed.py/0
{ "file_path": "transformers/examples/research_projects/wav2vec2/test_wav2vec2_deepspeed.py", "repo_id": "transformers", "token_count": 2788 }
307
#!/usr/bin/env python # coding=utf-8 # Copyright 2023 The HuggingFace Team All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Training a CLIP like dual encoder models using text and vision encoders in the library. The script can be used to train CLIP like models for languages other than English by using a text encoder pre-trained in the desired language. Currently this script supports the following vision and text models: Vision models: ViT(https://huggingface.co/models?filter=vit), CLIP (https://huggingface.co/models?filter=clip) Text models: BERT, ROBERTa (https://huggingface.co/models?filter=fill-mask) """ import logging import os import sys import warnings from dataclasses import dataclass, field from typing import Optional import tensorflow as tf from datasets import load_dataset from PIL import Image import transformers from transformers import ( AutoImageProcessor, AutoTokenizer, HfArgumentParser, PushToHubCallback, TFAutoModel, TFTrainingArguments, TFVisionTextDualEncoderModel, create_optimizer, ) from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version logger = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.40.0.dev0") require_version( "datasets>=1.8.0", "To fix: pip install -r examples/tensorflow/contrastive-image-text/requirements.txt" ) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}, default=None ) vision_model_name_or_path: str = field( metadata={"help": "Path to pretrained image model or model identifier from huggingface.co/models"}, default=None, ) text_model_name_or_path: str = field( metadata={"help": "Path to pretrained text model or model identifier from huggingface.co/models"}, default=None ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) image_processor_name: str = field(default=None, metadata={"help": "Name or path of preprocessor config."}) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) token: str = field( default=None, metadata={ "help": ( "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token " "generated when running `huggingface-cli login` (stored in `~/.huggingface`)." ) }, ) use_auth_token: bool = field( default=None, metadata={ "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token` instead." }, ) trust_remote_code: bool = field( default=False, metadata={ "help": ( "Whether or not to allow for custom models defined on the Hub in their own modeling files. This option " "should only be set to `True` for repositories you trust and in which you have read the code, as it will " "execute code present on the Hub on your local machine." ) }, ) freeze_vision_model: bool = field( default=False, metadata={"help": "Whether to freeze the vision model parameters or not."} ) freeze_text_model: bool = field( default=False, metadata={"help": "Whether to freeze the text model parameters or not."} ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ dataset_name: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) data_dir: Optional[str] = field(default=None, metadata={"help": "The data directory containing input files."}) image_column: Optional[str] = field( default="image_path", metadata={"help": "The name of the column in the datasets containing the full image file paths."}, ) caption_column: Optional[str] = field( default="caption", metadata={"help": "The name of the column in the datasets containing the image captions."}, ) train_file: Optional[str] = field( default=None, metadata={"help": "The input training data file (a jsonlines file)."} ) validation_file: Optional[str] = field( default=None, metadata={"help": "An optional input evaluation data file (a jsonlines file)."}, ) test_file: Optional[str] = field( default=None, metadata={"help": "An optional input testing data file (a jsonlines file)."}, ) max_seq_length: Optional[int] = field( default=128, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) def __post_init__(self): if self.dataset_name is None and self.train_file is None and self.validation_file is None: raise ValueError("Need either a dataset name or a training/validation file.") else: if self.train_file is not None: extension = self.train_file.split(".")[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: extension = self.validation_file.split(".")[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." if self.validation_file is not None: extension = self.validation_file.split(".")[-1] assert extension == "json", "`validation_file` should be a json file." dataset_name_mapping = { "image_caption_dataset.py": ("image_path", "caption"), } def crop_to_square(image): height, width = tf.shape(image)[0], tf.shape(image)[1] if height > width: image = tf.image.crop_to_bounding_box(image, (height - width) // 2, 0, width, width) elif width > height: image = tf.image.crop_to_bounding_box(image, 0, (width - height) // 2, height, height) return image def load_as_tf_dataset(dataset, image_column, image_size, mean, std, batch_size, shuffle): dataset = dataset.with_format("tensorflow")[:] # Load the dataset as tensor slices, but not the images yet! tf_dataset = tf.data.Dataset.from_tensor_slices(dataset) def load_image(sample): image_path = sample[image_column] image = tf.io.read_file(image_path) image = tf.image.decode_image(image, channels=3, expand_animations=False) image = crop_to_square(image) image = tf.image.resize(image, [image_size, image_size], method="bicubic", antialias=True) image = image / 255.0 image = (image - mean) / std image = tf.transpose(image, perm=[2, 0, 1]) # Convert to channels-first sample["pixel_values"] = image del sample[image_column] return sample if shuffle: tf_dataset = tf_dataset.shuffle(len(tf_dataset)) tf_dataset = tf_dataset.map(load_image, num_parallel_calls=tf.data.experimental.AUTOTUNE) tf_dataset = tf_dataset.batch(batch_size, drop_remainder=shuffle) tf_dataset = tf_dataset.prefetch(tf.data.experimental.AUTOTUNE) return tf_dataset def main(): # 1. Parse input arguments # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() if model_args.use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token` instead.", FutureWarning, ) if model_args.token is not None: raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") model_args.token = model_args.use_auth_token if model_args.model_name_or_path is not None: if model_args.vision_model_name_or_path is not None or model_args.text_model_name_or_path is not None: raise ValueError( "If using model_name_or_path, you cannot specify separate image/text model paths as well!" ) if model_args.vision_model_name_or_path is not None or model_args.text_model_name_or_path is not None: if model_args.model_name_or_path is not None: raise ValueError( "If using separate image/text model paths, you cannot specify model_name_or_path as well!" ) if not (model_args.vision_model_name_or_path is not None and model_args.text_model_name_or_path is not None): raise ValueError( "If using separate image/text model paths, you must specify both vision_model_name_or_path " "and text_model_name_or_path!" ) # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/TensorFlow versions. send_example_telemetry("run_clip", model_args, data_args, framework="tensorflow") # 2. Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() log_level = training_args.get_process_log_level() logger.setLevel(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.info(f"Training/evaluation parameters {training_args}") # 3. Detecting last checkpoint and eventually continue from last checkpoint last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # 4. Load dataset # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files this script will use the first column for the full image path and the second column for the # captions (unless you specify column names for this with the `image_column` and `caption_column` arguments). # if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. dataset = load_dataset( data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, keep_in_memory=False, data_dir=data_args.data_dir, token=model_args.token, ) else: data_files = {} if data_args.train_file is not None: data_files["train"] = data_args.train_file extension = data_args.train_file.split(".")[-1] if data_args.validation_file is not None: data_files["validation"] = data_args.validation_file extension = data_args.validation_file.split(".")[-1] if data_args.test_file is not None: data_files["test"] = data_args.test_file extension = data_args.test_file.split(".")[-1] dataset = load_dataset( extension, data_files=data_files, cache_dir=model_args.cache_dir, token=model_args.token, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets. # 5. Load pretrained model, tokenizer, and image processor if model_args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) elif model_args.model_name_or_path: tokenizer = AutoTokenizer.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) elif model_args.text_model_name_or_path: tokenizer = AutoTokenizer.from_pretrained( model_args.text_model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script. " "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) if model_args.model_name_or_path: # Load image_processor, in this script we only use this to get the mean and std for normalization. image_processor = AutoImageProcessor.from_pretrained( model_args.image_processor_name or model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) with training_args.strategy.scope(): model = TFAutoModel.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) else: # Load image_processor, in this script we only use this to get the mean and std for normalization. image_processor = AutoImageProcessor.from_pretrained( model_args.image_processor_name or model_args.vision_model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) with training_args.strategy.scope(): model = TFVisionTextDualEncoderModel.from_vision_text_pretrained( vision_model_name_or_path=model_args.vision_model_name_or_path, text_model_name_or_path=model_args.text_model_name_or_path, cache_dir=model_args.cache_dir, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) config = model.config if model_args.freeze_vision_model: model.vision_model.trainable = False if model_args.freeze_text_model: model.text_model.trainable = False # Preprocessing the datasets. # We need to tokenize inputs and targets. if training_args.do_train: column_names = dataset["train"].column_names elif training_args.do_eval: column_names = dataset["validation"].column_names elif training_args.do_predict: column_names = dataset["test"].column_names else: logger.info("There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.") return # 6. Get the column names for input/target. dataset_columns = dataset_name_mapping.get(data_args.dataset_name, None) if data_args.image_column is None: image_column = dataset_columns[0] if dataset_columns is not None else column_names[0] else: image_column = data_args.image_column if image_column not in column_names: raise ValueError( f"--image_column' value '{data_args.image_column}' needs to be one of: {', '.join(column_names)}" ) if data_args.caption_column is None: caption_column = dataset_columns[1] if dataset_columns is not None else column_names[1] else: caption_column = data_args.caption_column if caption_column not in column_names: raise ValueError( f"--caption_column' value '{data_args.caption_column}' needs to be one of: {', '.join(column_names)}" ) # # 7. Preprocessing the datasets. # We need to tokenize input captions and transform the images. def tokenize_captions(examples): captions = list(examples[caption_column]) text_inputs = tokenizer(captions, max_length=data_args.max_seq_length, padding="max_length", truncation=True) examples["input_ids"] = text_inputs.input_ids examples["attention_mask"] = text_inputs.attention_mask return examples def filter_corrupt_images(examples): """remove problematic images""" valid_images = [] for image_file in examples[image_column]: try: Image.open(image_file) valid_images.append(True) except Exception: valid_images.append(False) return valid_images if training_args.do_train: if "train" not in dataset: raise ValueError("--do_train requires a train dataset") train_dataset = dataset["train"] if data_args.max_train_samples is not None: max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) train_dataset = train_dataset.filter( filter_corrupt_images, batched=True, num_proc=data_args.preprocessing_num_workers ) train_dataset = train_dataset.map( function=tokenize_captions, batched=True, remove_columns=[col for col in column_names if col != image_column], num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on train dataset", ) tf_train_dataset = load_as_tf_dataset( dataset=train_dataset, batch_size=training_args.per_device_train_batch_size, image_column=image_column, image_size=config.vision_config.image_size, mean=image_processor.image_mean, std=image_processor.image_std, shuffle=True, ) if training_args.do_eval: if "validation" not in dataset: raise ValueError("--do_eval requires a train validation") eval_dataset = dataset["validation"] if data_args.max_eval_samples is not None: max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) eval_dataset = eval_dataset.select(range(max_eval_samples)) eval_dataset = eval_dataset.filter( filter_corrupt_images, batched=True, num_proc=data_args.preprocessing_num_workers ) eval_dataset = eval_dataset.map( function=tokenize_captions, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=[col for col in column_names if col != image_column], load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on validation dataset", ) tf_eval_dataset = load_as_tf_dataset( dataset=eval_dataset, batch_size=training_args.per_device_eval_batch_size, image_column=image_column, image_size=config.vision_config.image_size, mean=image_processor.image_mean, std=image_processor.image_std, shuffle=False, ) # 8. Preparing push_to_hub and model card push_to_hub_model_id = training_args.push_to_hub_model_id if model_args.model_name_or_path is not None: model_name = model_args.model_name_or_path.split("/")[-1] else: vision_name = model_args.vision_model_name_or_path.split("/")[-1] text_name = model_args.text_model_name_or_path.split("/")[-1] model_name = f"{vision_name}-{text_name}" if not push_to_hub_model_id: if data_args.dataset_name is not None: push_to_hub_model_id = f"{model_name}-finetuned-{data_args.dataset_name}" else: push_to_hub_model_id = f"{model_name}-finetuned-contrastive-image-text-modeling" model_card_kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "contrastive-image-text-modeling"} if data_args.dataset_name is not None: model_card_kwargs["dataset_tags"] = data_args.dataset_name if data_args.dataset_config_name is not None: model_card_kwargs["dataset_args"] = data_args.dataset_config_name model_card_kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" else: model_card_kwargs["dataset"] = data_args.dataset_name if training_args.push_to_hub: callbacks = [ PushToHubCallback( output_dir=training_args.output_dir, hub_model_id=push_to_hub_model_id, hub_token=training_args.push_to_hub_token, tokenizer=tokenizer, **model_card_kwargs, ) ] else: callbacks = [] # # 9. Training if training_args.do_train: num_train_steps = int(len(tf_train_dataset) * int(training_args.num_train_epochs)) if training_args.warmup_steps > 0: num_warmup_steps = training_args.warmup_steps elif training_args.warmup_ratio > 0: num_warmup_steps = int(num_train_steps * training_args.warmup_ratio) else: num_warmup_steps = 0 optimizer, lr_schedule = create_optimizer( init_lr=training_args.learning_rate, num_train_steps=num_train_steps, num_warmup_steps=num_warmup_steps, adam_beta1=training_args.adam_beta1, adam_beta2=training_args.adam_beta2, adam_epsilon=training_args.adam_epsilon, weight_decay_rate=training_args.weight_decay, adam_global_clipnorm=training_args.max_grad_norm, ) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=optimizer, jit_compile=training_args.xla) if not training_args.do_eval: tf_eval_dataset = None model.fit( tf_train_dataset, validation_data=tf_eval_dataset, epochs=int(training_args.num_train_epochs), callbacks=callbacks, ) # # 10. Evaluation if training_args.do_eval and not training_args.do_train: model.evaluate(tf_eval_dataset) if __name__ == "__main__": main()
transformers/examples/tensorflow/contrastive-image-text/run_clip.py/0
{ "file_path": "transformers/examples/tensorflow/contrastive-image-text/run_clip.py", "repo_id": "transformers", "token_count": 11224 }
308
<!--- Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Question answering example This folder contains the `run_qa.py` script, demonstrating *question answering* with the 🤗 Transformers library. For straightforward use-cases you may be able to use this script without modification, although we have also included comments in the code to indicate areas that you may need to adapt to your own projects. ### Usage notes Note that when contexts are long they may be split into multiple training cases, not all of which may contain the answer span. As-is, the example script will train on SQuAD or any other question-answering dataset formatted the same way, and can handle user inputs as well. ### Multi-GPU and TPU usage By default, the script uses a `MirroredStrategy` and will use multiple GPUs effectively if they are available. TPUs can also be used by passing the name of the TPU resource with the `--tpu` argument. There are some issues surrounding these strategies and our models right now, which are most likely to appear in the evaluation/prediction steps. We're actively working on better support for multi-GPU and TPU training in TF, but if you encounter problems a quick workaround is to train in the multi-GPU or TPU context and then perform predictions outside of it. ### Memory usage and data loading One thing to note is that all data is loaded into memory in this script. Most question answering datasets are small enough that this is not an issue, but if you have a very large dataset you will need to modify the script to handle data streaming. This is particularly challenging for TPUs, given the stricter requirements and the sheer volume of data required to keep them fed. A full explanation of all the possible pitfalls is a bit beyond this example script and README, but for more information you can see the 'Input Datasets' section of [this document](https://www.tensorflow.org/guide/tpu). ### Example command ```bash python run_qa.py \ --model_name_or_path distilbert/distilbert-base-cased \ --output_dir output \ --dataset_name squad \ --do_train \ --do_eval \ ```
transformers/examples/tensorflow/question-answering/README.md/0
{ "file_path": "transformers/examples/tensorflow/question-answering/README.md", "repo_id": "transformers", "token_count": 657 }
309
#!/usr/bin/env python # coding: utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny model through reduction of a normal pre-trained model, but keeping the # full vocab, merges file, and thus also resulting in a larger model due to a large vocab size. # This gives ~3MB in total for all files. # # If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated # # # It will be used then as "stas/tiny-wmt19-en-de" # Build from transformers import FSMTConfig, FSMTForConditionalGeneration, FSMTTokenizer mname = "facebook/wmt19-en-de" tokenizer = FSMTTokenizer.from_pretrained(mname) # get the correct vocab sizes, etc. from the master model config = FSMTConfig.from_pretrained(mname) config.update({ "d_model": 4, "encoder_layers": 1, "decoder_layers": 1, "encoder_ffn_dim": 4, "decoder_ffn_dim": 4, "encoder_attention_heads": 1, "decoder_attention_heads": 1}) tiny_model = FSMTForConditionalGeneration(config) print(f"num of params {tiny_model.num_parameters()}") # Test batch = tokenizer(["Making tiny model"], return_tensors="pt") outputs = tiny_model(**batch) print("test output:", len(outputs.logits[0])) # Save mname_tiny = "tiny-wmt19-en-de" tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(f"Generated {mname_tiny}") # Upload # transformers-cli upload tiny-wmt19-en-de
transformers/scripts/fsmt/fsmt-make-tiny-model.py/0
{ "file_path": "transformers/scripts/fsmt/fsmt-make-tiny-model.py", "repo_id": "transformers", "token_count": 704 }
310
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Benchmarking the library on inference and training in PyTorch. """ import timeit from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_auto import MODEL_MAPPING, MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_py3nvml_available, is_torch_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_torch_available(): import torch from .benchmark_args import PyTorchBenchmarkArguments if is_py3nvml_available(): import py3nvml.py3nvml as nvml logger = logging.get_logger(__name__) class PyTorchBenchmark(Benchmark): args: PyTorchBenchmarkArguments configs: PretrainedConfig framework: str = "PyTorch" @property def framework_version(self): return torch.__version__ def _inference_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float: _inference = self._prepare_inference_func(model_name, batch_size, sequence_length) return self._measure_speed(_inference) def _inference_memory( self, model_name: str, batch_size: int, sequence_length: int ) -> [Memory, Optional[MemorySummary]]: _inference = self._prepare_inference_func(model_name, batch_size, sequence_length) return self._measure_memory(_inference) def _train_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float: _train = self._prepare_train_func(model_name, batch_size, sequence_length) return self._measure_speed(_train) def _train_memory( self, model_name: str, batch_size: int, sequence_length: int ) -> [Memory, Optional[MemorySummary]]: _train = self._prepare_train_func(model_name, batch_size, sequence_length) return self._measure_memory(_train) def _prepare_inference_func(self, model_name: str, batch_size: int, sequence_length: int) -> Callable[[], None]: config = self.config_dict[model_name] if self.args.torchscript: config.torchscript = True has_model_class_in_config = ( hasattr(config, "architectures") and isinstance(config.architectures, list) and len(config.architectures) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: model_class = config.architectures[0] transformers_module = __import__("transformers", fromlist=[model_class]) model_cls = getattr(transformers_module, model_class) model = model_cls(config) except ImportError: raise ImportError( f"{model_class} does not exist. If you just want to test the pretrained model, you might want to" " set `--only_pretrain_model` or `args.only_pretrain_model=True`." ) else: model = MODEL_MAPPING[config.__class__](config) model.eval() model.to(self.args.device) # encoder-decoder has vocab size saved differently vocab_size = config.vocab_size if hasattr(config, "vocab_size") else config.encoder.vocab_size input_ids = torch.randint(vocab_size, (batch_size, sequence_length), dtype=torch.long, device=self.args.device) if self.args.fp16: logger.info("Running training in Mixed Precision...") if not self.args.is_gpu: raise ValueError("Mixed precision is possible only for GPU.") # amp seems to have memory leaks so that memory usage # is measured using .half() for now https://github.com/NVIDIA/apex/issues/439 model.half() if self.args.torchscript: with torch.no_grad(): inference_model = torch.jit.trace(model, input_ids) else: inference_model = model def encoder_decoder_forward(): with torch.no_grad(): outputs = inference_model(input_ids, decoder_input_ids=input_ids) return outputs def encoder_forward(): with torch.no_grad(): outputs = inference_model(input_ids) return outputs _forward = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _forward def _prepare_train_func(self, model_name: str, batch_size: int, sequence_length: int) -> Callable[[], None]: config = self.config_dict[model_name] has_model_class_in_config = ( hasattr(config, "architectures") and isinstance(config.architectures, list) and len(config.architectures) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: model_class = config.architectures[0] transformers_module = __import__("transformers", fromlist=[model_class]) model_cls = getattr(transformers_module, model_class) model = model_cls(config) except ImportError: raise ImportError( f"{model_class} does not exist. If you just want to test the pretrained model, you might want to" " set `--only_pretrain_model` or `args.only_pretrain_model=True`." ) else: model = MODEL_WITH_LM_HEAD_MAPPING[config.__class__](config) if self.args.torchscript: raise NotImplementedError("Training for torchscript is currently not implemented") else: train_model = model model.train() model.to(self.args.device) # encoder-decoder has vocab size saved differently vocab_size = config.vocab_size if hasattr(config, "vocab_size") else config.encoder.vocab_size input_ids = torch.randint(vocab_size, (batch_size, sequence_length), dtype=torch.long, device=self.args.device) if self.args.fp16: logger.info("Running training in Mixed Precision...") if not self.args.is_gpu: raise ValueError("Mixed precision is possible only for GPU.") # amp seems to have memory leaks so that memory usage # is measured using .half() for now https://github.com/NVIDIA/apex/issues/439 model.half() def compute_loss_and_backprob_encoder(): loss = train_model(input_ids, labels=input_ids)[0] loss.backward() return loss def compute_loss_and_backprob_encoder_decoder(): loss = train_model(input_ids, decoder_input_ids=input_ids, labels=input_ids)[0] loss.backward() return loss _train = ( compute_loss_and_backprob_encoder_decoder if config.is_encoder_decoder else compute_loss_and_backprob_encoder ) return _train def _measure_speed(self, func) -> float: try: if self.args.is_tpu or self.args.torchscript: # run additional 10 times to stabilize compilation for tpu and torchscript logger.info("Do inference on TPU or torchscript. Running model 5 times to stabilize compilation") timeit.repeat( func, repeat=1, number=5, ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average runtimes = timeit.repeat( func, repeat=self.args.repeat, number=10, ) if self.args.is_tpu and self.args.torch_xla_tpu_print_metrics: import torch_xla.debug.metrics as met self.print_fn(met.metrics_report()) return min(runtimes) / 10.0 except RuntimeError as e: self.print_fn(f"Doesn't fit on GPU. {e}") return "N/A" def _measure_memory(self, func: Callable[[], None]) -> [Memory, MemorySummary]: try: if self.args.trace_memory_line_by_line: trace = start_memory_tracing("transformers") if self.args.is_tpu: # tpu raise NotImplementedError( "Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking with" " `--no-memory` or `args.memory=False`" ) elif self.args.is_gpu: if not is_py3nvml_available(): logger.warning( "py3nvml not installed, we won't log GPU memory usage. " "Install py3nvml (pip install py3nvml) to log information about GPU." ) memory = "N/A" else: logger.info( "Measuring total GPU usage on GPU device. Make sure to not have additional processes running" " on the same GPU." ) # init nvml nvml.nvmlInit() func() handle = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx) meminfo = nvml.nvmlDeviceGetMemoryInfo(handle) max_bytes_in_use = meminfo.used memory = Memory(max_bytes_in_use) # shutdown nvml nvml.nvmlShutdown() else: # cpu memory_bytes = measure_peak_memory_cpu(func) memory = Memory(memory_bytes) if isinstance(memory_bytes, int) else memory_bytes if self.args.trace_memory_line_by_line: summary = stop_memory_tracing(trace) else: summary = None return memory, summary except RuntimeError as e: self.print_fn(f"Doesn't fit on GPU. {e}") return "N/A", None
transformers/src/transformers/benchmark/benchmark.py/0
{ "file_path": "transformers/src/transformers/benchmark/benchmark.py", "repo_id": "transformers", "token_count": 4892 }
311
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser, Namespace from typing import Any, List, Optional from ..pipelines import Pipeline, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand try: from fastapi import Body, FastAPI, HTTPException from fastapi.routing import APIRoute from pydantic import BaseModel from starlette.responses import JSONResponse from uvicorn import run _serve_dependencies_installed = True except (ImportError, AttributeError): BaseModel = object def Body(*x, **y): pass _serve_dependencies_installed = False logger = logging.get_logger("transformers-cli/serving") def serve_command_factory(args: Namespace): """ Factory function used to instantiate serving server from provided command line arguments. Returns: ServeCommand """ nlp = pipeline( task=args.task, model=args.model if args.model else None, config=args.config, tokenizer=args.tokenizer, device=args.device, ) return ServeCommand(nlp, args.host, args.port, args.workers) class ServeModelInfoResult(BaseModel): """ Expose model information """ infos: dict class ServeTokenizeResult(BaseModel): """ Tokenize result model """ tokens: List[str] tokens_ids: Optional[List[int]] class ServeDeTokenizeResult(BaseModel): """ DeTokenize result model """ text: str class ServeForwardResult(BaseModel): """ Forward result model """ output: Any class ServeCommand(BaseTransformersCLICommand): @staticmethod def register_subcommand(parser: ArgumentParser): """ Register this command to argparse so it's available for the transformer-cli Args: parser: Root parser to register command-specific arguments """ serve_parser = parser.add_parser( "serve", help="CLI tool to run inference requests through REST and GraphQL endpoints." ) serve_parser.add_argument( "--task", type=str, choices=get_supported_tasks(), help="The task to run the pipeline on", ) serve_parser.add_argument("--host", type=str, default="localhost", help="Interface the server will listen on.") serve_parser.add_argument("--port", type=int, default=8888, help="Port the serving will listen to.") serve_parser.add_argument("--workers", type=int, default=1, help="Number of http workers") serve_parser.add_argument("--model", type=str, help="Model's name or path to stored model.") serve_parser.add_argument("--config", type=str, help="Model's config name or path to stored model.") serve_parser.add_argument("--tokenizer", type=str, help="Tokenizer name to use.") serve_parser.add_argument( "--device", type=int, default=-1, help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)", ) serve_parser.set_defaults(func=serve_command_factory) def __init__(self, pipeline: Pipeline, host: str, port: int, workers: int): self._pipeline = pipeline self.host = host self.port = port self.workers = workers if not _serve_dependencies_installed: raise RuntimeError( "Using serve command requires FastAPI and uvicorn. " 'Please install transformers with [serving]: pip install "transformers[serving]". ' "Or install FastAPI and uvicorn separately." ) else: logger.info(f"Serving model over {host}:{port}") self._app = FastAPI( routes=[ APIRoute( "/", self.model_info, response_model=ServeModelInfoResult, response_class=JSONResponse, methods=["GET"], ), APIRoute( "/tokenize", self.tokenize, response_model=ServeTokenizeResult, response_class=JSONResponse, methods=["POST"], ), APIRoute( "/detokenize", self.detokenize, response_model=ServeDeTokenizeResult, response_class=JSONResponse, methods=["POST"], ), APIRoute( "/forward", self.forward, response_model=ServeForwardResult, response_class=JSONResponse, methods=["POST"], ), ], timeout=600, ) def run(self): run(self._app, host=self.host, port=self.port, workers=self.workers) def model_info(self): return ServeModelInfoResult(infos=vars(self._pipeline.model.config)) def tokenize(self, text_input: str = Body(None, embed=True), return_ids: bool = Body(False, embed=True)): """ Tokenize the provided input and eventually returns corresponding tokens id: - **text_input**: String to tokenize - **return_ids**: Boolean flags indicating if the tokens have to be converted to their integer mapping. """ try: tokens_txt = self._pipeline.tokenizer.tokenize(text_input) if return_ids: tokens_ids = self._pipeline.tokenizer.convert_tokens_to_ids(tokens_txt) return ServeTokenizeResult(tokens=tokens_txt, tokens_ids=tokens_ids) else: return ServeTokenizeResult(tokens=tokens_txt) except Exception as e: raise HTTPException(status_code=500, detail={"model": "", "error": str(e)}) def detokenize( self, tokens_ids: List[int] = Body(None, embed=True), skip_special_tokens: bool = Body(False, embed=True), cleanup_tokenization_spaces: bool = Body(True, embed=True), ): """ Detokenize the provided tokens ids to readable text: - **tokens_ids**: List of tokens ids - **skip_special_tokens**: Flag indicating to not try to decode special tokens - **cleanup_tokenization_spaces**: Flag indicating to remove all leading/trailing spaces and intermediate ones. """ try: decoded_str = self._pipeline.tokenizer.decode(tokens_ids, skip_special_tokens, cleanup_tokenization_spaces) return ServeDeTokenizeResult(model="", text=decoded_str) except Exception as e: raise HTTPException(status_code=500, detail={"model": "", "error": str(e)}) async def forward(self, inputs=Body(None, embed=True)): """ **inputs**: **attention_mask**: **tokens_type_ids**: """ # Check we don't have empty string if len(inputs) == 0: return ServeForwardResult(output=[], attention=[]) try: # Forward through the model output = self._pipeline(inputs) return ServeForwardResult(output=output) except Exception as e: raise HTTPException(500, {"error": str(e)})
transformers/src/transformers/commands/serving.py/0
{ "file_path": "transformers/src/transformers/commands/serving.py", "repo_id": "transformers", "token_count": 3477 }
312
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import f1_score, matthews_corrcoef DEPRECATION_WARNING = ( "This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate " "library. You can have a look at this example script for pointers: " "https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" ) def simple_accuracy(preds, labels): warnings.warn(DEPRECATION_WARNING, FutureWarning) requires_backends(simple_accuracy, "sklearn") return (preds == labels).mean() def acc_and_f1(preds, labels): warnings.warn(DEPRECATION_WARNING, FutureWarning) requires_backends(acc_and_f1, "sklearn") acc = simple_accuracy(preds, labels) f1 = f1_score(y_true=labels, y_pred=preds) return { "acc": acc, "f1": f1, "acc_and_f1": (acc + f1) / 2, } def pearson_and_spearman(preds, labels): warnings.warn(DEPRECATION_WARNING, FutureWarning) requires_backends(pearson_and_spearman, "sklearn") pearson_corr = pearsonr(preds, labels)[0] spearman_corr = spearmanr(preds, labels)[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def glue_compute_metrics(task_name, preds, labels): warnings.warn(DEPRECATION_WARNING, FutureWarning) requires_backends(glue_compute_metrics, "sklearn") assert len(preds) == len(labels), f"Predictions and labels have mismatched lengths {len(preds)} and {len(labels)}" if task_name == "cola": return {"mcc": matthews_corrcoef(labels, preds)} elif task_name == "sst-2": return {"acc": simple_accuracy(preds, labels)} elif task_name == "mrpc": return acc_and_f1(preds, labels) elif task_name == "sts-b": return pearson_and_spearman(preds, labels) elif task_name == "qqp": return acc_and_f1(preds, labels) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(preds, labels)} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(preds, labels)} elif task_name == "qnli": return {"acc": simple_accuracy(preds, labels)} elif task_name == "rte": return {"acc": simple_accuracy(preds, labels)} elif task_name == "wnli": return {"acc": simple_accuracy(preds, labels)} elif task_name == "hans": return {"acc": simple_accuracy(preds, labels)} else: raise KeyError(task_name) def xnli_compute_metrics(task_name, preds, labels): warnings.warn(DEPRECATION_WARNING, FutureWarning) requires_backends(xnli_compute_metrics, "sklearn") if len(preds) != len(labels): raise ValueError(f"Predictions and labels have mismatched lengths {len(preds)} and {len(labels)}") if task_name == "xnli": return {"acc": simple_accuracy(preds, labels)} else: raise KeyError(task_name)
transformers/src/transformers/data/metrics/__init__.py/0
{ "file_path": "transformers/src/transformers/data/metrics/__init__.py", "repo_id": "transformers", "token_count": 1413 }
313
from abc import ABC, abstractmethod from typing import List, Optional class Constraint(ABC): r"""Abstract base class for all constraints that can be applied during generation. It must define how the constraint can be satisfied. All classes that inherit Constraint must follow the requirement that ```py completed = False while not completed: _, completed = constraint.update(constraint.advance()) ``` will always terminate (halt). """ def __init__(self): # test for the above condition self.test() def test(self): """ Tests whether this constraint has been properly defined. """ counter = 0 completed = False while not completed: if counter == 1: self.reset() advance = self.advance() if not self.does_advance(advance): raise Exception( "Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true." ) stepped, completed, reset = self.update(advance) counter += 1 if counter > 10000: raise Exception("update() does not fulfill the constraint.") if self.remaining() != 0: raise Exception("Custom Constraint is not defined correctly.") @abstractmethod def advance(self): """ When called, returns the token that would take this constraint one step closer to being fulfilled. Return: token_ids(`torch.tensor`): Must be a tensor of a list of indexable tokens, not some integer. """ raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." ) @abstractmethod def does_advance(self, token_id: int): """ Reads in a token and returns whether it creates progress. """ raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." ) @abstractmethod def update(self, token_id: int): """ Reads in a token and returns booleans that indicate the progress made by it. This function will update the state of this object unlikes `does_advance(self, token_id: int)`. This isn't to test whether a certain token will advance the progress; it's to update its state as if it has been generated. This becomes important if token_id != desired token (refer to else statement in PhrasalConstraint) Args: token_id(`int`): The id of a newly generated token in the beam search. Return: stepped(`bool`): Whether this constraint has become one step closer to being fulfuilled. completed(`bool`): Whether this constraint has been completely fulfilled by this token being generated. reset (`bool`): Whether this constraint has reset its progress by this token being generated. """ raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." ) @abstractmethod def reset(self): """ Resets the state of this constraint to its initialization. We would call this in cases where the fulfillment of a constraint is abrupted by an unwanted token. """ raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." ) @abstractmethod def remaining(self): """ Returns the number of remaining steps of `advance()` in order to complete this constraint. """ raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." ) @abstractmethod def copy(self, stateful=False): """ Creates a new instance of this constraint. Args: stateful(`bool`): Whether to not only copy the constraint for new instance, but also its state. Return: constraint(`Constraint`): The same constraint as the one being called from. """ raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." ) class PhrasalConstraint(Constraint): r""" [`Constraint`] enforcing that an ordered sequence of tokens is included in the output. Args: token_ids (`List[int]`): The id of the token that must be generated by the output. """ def __init__(self, token_ids: List[int]): super(Constraint, self).__init__() if not isinstance(token_ids, list) or len(token_ids) == 0: raise ValueError(f"`token_ids` has to be a non-empty list, but is {token_ids}.") if any((not isinstance(token_id, int) or token_id < 0) for token_id in token_ids): raise ValueError(f"Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.") self.token_ids = token_ids self.seqlen = len(self.token_ids) self.fulfilled_idx = -1 # the index of the currently fulfilled step self.completed = False def advance(self): if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def does_advance(self, token_id: int): if not isinstance(token_id, int): raise ValueError(f"`token_id` has to be an `int`, but is {token_id} of type {type(token_id)}") if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def update(self, token_id: int): if not isinstance(token_id, int): raise ValueError(f"`token_id` has to be an `int`, but is {token_id} of type {type(token_id)}") stepped = False completed = False reset = False if self.does_advance(token_id): self.fulfilled_idx += 1 stepped = True if self.fulfilled_idx == (self.seqlen - 1): completed = True self.completed = completed else: # failed to make progress. reset = True self.reset() return stepped, completed, reset def reset(self): self.completed = False self.fulfilled_idx = 0 def remaining(self): return self.seqlen - (self.fulfilled_idx + 1) def copy(self, stateful=False): new_constraint = PhrasalConstraint(self.token_ids) if stateful: new_constraint.seq_len = self.seqlen new_constraint.fulfilled_idx = self.fulfilled_idx new_constraint.completed = self.completed return new_constraint class DisjunctiveTrie: def __init__(self, nested_token_ids: List[List[int]], no_subsets=True): r""" A helper class that builds a trie with the words represented in `nested_token_ids`. """ self.max_height = max([len(one) for one in nested_token_ids]) root = {} for token_ids in nested_token_ids: level = root for tidx, token_id in enumerate(token_ids): if token_id not in level: level[token_id] = {} level = level[token_id] if no_subsets and self.has_subsets(root, nested_token_ids): raise ValueError( "Each list in `nested_token_ids` can't be a complete subset of another list, but is" f" {nested_token_ids}." ) self.trie = root def next_tokens(self, current_seq): """ The next possible tokens that will progress the trie, given the current sequence of tokens in `current_seq`. """ start = self.trie for current_token in current_seq: start = start[current_token] next_tokens = list(start.keys()) return next_tokens def reached_leaf(self, current_seq): next_tokens = self.next_tokens(current_seq) return len(next_tokens) == 0 def count_leaves(self, root): next_nodes = list(root.values()) if len(next_nodes) == 0: return 1 else: return sum([self.count_leaves(nn) for nn in next_nodes]) def has_subsets(self, trie, nested_token_ids): """ Returns whether # of leaves == # of words. Otherwise some word is a subset of another. """ leaf_count = self.count_leaves(trie) return len(nested_token_ids) != leaf_count class DisjunctiveConstraint(Constraint): r""" A special [`Constraint`] that is fulfilled by fulfilling just one of several constraints. Args: nested_token_ids (`List[List[int]]`): A list of words, where each word is a list of ids. This constraint is fulfilled by generating just one from the list of words. """ def __init__(self, nested_token_ids: List[List[int]]): super(Constraint, self).__init__() if not isinstance(nested_token_ids, list) or len(nested_token_ids) == 0: raise ValueError(f"`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.") if any(not isinstance(token_ids, list) for token_ids in nested_token_ids): raise ValueError(f"`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.") if any( any((not isinstance(token_id, int) or token_id < 0) for token_id in token_ids) for token_ids in nested_token_ids ): raise ValueError( f"Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}." ) self.trie = DisjunctiveTrie(nested_token_ids) self.token_ids = nested_token_ids self.seqlen = self.trie.max_height self.current_seq = [] self.completed = False def advance(self): token_list = self.trie.next_tokens(self.current_seq) if len(token_list) == 0: return None else: return token_list def does_advance(self, token_id: int): if not isinstance(token_id, int): raise ValueError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(token_id)}") next_tokens = self.trie.next_tokens(self.current_seq) return token_id in next_tokens def update(self, token_id: int): if not isinstance(token_id, int): raise ValueError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(token_id)}") stepped = False completed = False reset = False if self.does_advance(token_id): self.current_seq.append(token_id) stepped = True else: reset = True self.reset() completed = self.trie.reached_leaf(self.current_seq) self.completed = completed return stepped, completed, reset def reset(self): self.completed = False self.current_seq = [] def remaining(self): if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq) def copy(self, stateful=False): new_constraint = DisjunctiveConstraint(self.token_ids) if stateful: new_constraint.seq_len = self.seqlen new_constraint.current_seq = self.current_seq new_constraint.completed = self.completed return new_constraint class ConstraintListState: r""" A class for beam scorers to track its progress through a list of constraints. Args: constraints (`List[Constraint]`): A list of [`Constraint`] objects that must be fulfilled by the beam scorer. """ def __init__(self, constraints: List[Constraint]): self.constraints = constraints # max # of steps required to fulfill a given constraint self.max_seqlen = max([c.seqlen for c in constraints]) self.n_constraints = len(constraints) self.completed = False self.init_state() def init_state(self): self.complete_constraints = [] self.inprogress_constraint = None self.pending_constraints = [constraint.copy(stateful=False) for constraint in self.constraints] def get_bank(self): add = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints) * self.max_seqlen) + add def advance(self): """The list of tokens to generate such that we can make progress. By "list" we don't mean the list of token that will fully fulfill a constraint. Given constraints `c_i = {t_ij | j == # of tokens}`, If we're not in the middle of progressing through a specific constraint `c_i`, we return: `[t_k1 for k in indices of unfulfilled constraints]` If we are in the middle of a constraint, then we return: `[t_ij]`, where `i` is the index of the inprogress constraint, `j` is the next step for the constraint. Though we don't care which constraint is fulfilled first, if we are in the progress of fulfilling a constraint, that's the only one we'll return. """ token_list = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" advance = constraint.advance() if isinstance(advance, int): token_list.append(advance) elif isinstance(advance, list): token_list.extend(advance) else: advance = self.inprogress_constraint.advance() if isinstance(advance, int): token_list.append(advance) elif isinstance(advance, list): token_list.extend(advance) if len(token_list) == 0: return None else: return token_list def reset(self, token_ids: Optional[List[int]]): """ token_ids: the tokens generated thus far to reset the state of the progress through constraints. """ self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint complete, stepped = self.add(token) # the entire list of constraints are fulfilled if self.completed: break def add(self, token_id: int): if not isinstance(token_id, int): raise ValueError(f"`token_id` should be an `int`, but is `{token_id}`.") complete, stepped = False, False if self.completed: complete = True stepped = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state stepped, complete, reset = self.inprogress_constraint.update(token_id) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=False)) self.inprogress_constraint = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint) self.inprogress_constraint = None if len(self.pending_constraints) == 0: # we're done! self.completed = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints): if pending_constraint.does_advance(token_id): stepped, complete, reset = pending_constraint.update(token_id) if not stepped: raise Exception( "`constraint.update(token_id)` is not yielding incremental progress, " "even though `constraint.does_advance(token_id)` is true." ) if complete: self.complete_constraints.append(pending_constraint) self.inprogress_constraint = None if not complete and stepped: self.inprogress_constraint = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". self.pending_constraints = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. self.completed = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def copy(self, stateful=True): new_state = ConstraintListState(self.constraints) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: new_state.complete_constraints = [ constraint.copy(stateful=True) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: new_state.inprogress_constraint = self.inprogress_constraint.copy(stateful=True) new_state.pending_constraints = [constraint.copy() for constraint in self.pending_constraints] return new_state
transformers/src/transformers/generation/beam_constraints.py/0
{ "file_path": "transformers/src/transformers/generation/beam_constraints.py", "repo_id": "transformers", "token_count": 8310 }
314
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .integrations import ( is_optuna_available, is_ray_tune_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging logger = logging.get_logger(__name__) class HyperParamSearchBackendBase: name: str pip_package: str = None @staticmethod def is_available(): raise NotImplementedError def run(self, trainer, n_trials: int, direction: str, **kwargs): raise NotImplementedError def default_hp_space(self, trial): raise NotImplementedError def ensure_available(self): if not self.is_available(): raise RuntimeError( f"You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}." ) @classmethod def pip_install(cls): return f"`pip install {cls.pip_package or cls.name}`" class OptunaBackend(HyperParamSearchBackendBase): name = "optuna" @staticmethod def is_available(): return is_optuna_available() def run(self, trainer, n_trials: int, direction: str, **kwargs): return run_hp_search_optuna(trainer, n_trials, direction, **kwargs) def default_hp_space(self, trial): return default_hp_space_optuna(trial) class RayTuneBackend(HyperParamSearchBackendBase): name = "ray" pip_package = "'ray[tune]'" @staticmethod def is_available(): return is_ray_tune_available() def run(self, trainer, n_trials: int, direction: str, **kwargs): return run_hp_search_ray(trainer, n_trials, direction, **kwargs) def default_hp_space(self, trial): return default_hp_space_ray(trial) class SigOptBackend(HyperParamSearchBackendBase): name = "sigopt" @staticmethod def is_available(): return is_sigopt_available() def run(self, trainer, n_trials: int, direction: str, **kwargs): return run_hp_search_sigopt(trainer, n_trials, direction, **kwargs) def default_hp_space(self, trial): return default_hp_space_sigopt(trial) class WandbBackend(HyperParamSearchBackendBase): name = "wandb" @staticmethod def is_available(): return is_wandb_available() def run(self, trainer, n_trials: int, direction: str, **kwargs): return run_hp_search_wandb(trainer, n_trials, direction, **kwargs) def default_hp_space(self, trial): return default_hp_space_wandb(trial) ALL_HYPERPARAMETER_SEARCH_BACKENDS = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def default_hp_search_backend() -> str: available_backends = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(available_backends) > 0: name = available_backends[0].name if len(available_backends) > 1: logger.info( f"{len(available_backends)} hyperparameter search backends available. Using {name} as the default." ) return name raise RuntimeError( "No hyperparameter search backend available.\n" + "\n".join( f" - To install {backend.name} run {backend.pip_install()}" for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
transformers/src/transformers/hyperparameter_search.py/0
{ "file_path": "transformers/src/transformers/hyperparameter_search.py", "repo_id": "transformers", "token_count": 1646 }
315
/*! ************************************************************************************************** * Deformable DETR * Copyright (c) 2020 SenseTime. All Rights Reserved. * Licensed under the Apache License, Version 2.0 [see LICENSE for details] ************************************************************************************************** * Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 ************************************************************************************************** */ #include <vector> #include "cuda/ms_deform_im2col_cuda.cuh" #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <cuda.h> #include <cuda_runtime.h> #pragma once #include <torch/extension.h> at::Tensor ms_deform_attn_cuda_forward( const at::Tensor &value, const at::Tensor &spatial_shapes, const at::Tensor &level_start_index, const at::Tensor &sampling_loc, const at::Tensor &attn_weight, const int im2col_step) { AT_ASSERTM(value.is_contiguous(), "value tensor has to be contiguous"); AT_ASSERTM(spatial_shapes.is_contiguous(), "spatial_shapes tensor has to be contiguous"); AT_ASSERTM(level_start_index.is_contiguous(), "level_start_index tensor has to be contiguous"); AT_ASSERTM(sampling_loc.is_contiguous(), "sampling_loc tensor has to be contiguous"); AT_ASSERTM(attn_weight.is_contiguous(), "attn_weight tensor has to be contiguous"); AT_ASSERTM(value.type().is_cuda(), "value must be a CUDA tensor"); AT_ASSERTM(spatial_shapes.type().is_cuda(), "spatial_shapes must be a CUDA tensor"); AT_ASSERTM(level_start_index.type().is_cuda(), "level_start_index must be a CUDA tensor"); AT_ASSERTM(sampling_loc.type().is_cuda(), "sampling_loc must be a CUDA tensor"); AT_ASSERTM(attn_weight.type().is_cuda(), "attn_weight must be a CUDA tensor"); const int batch = value.size(0); const int spatial_size = value.size(1); const int num_heads = value.size(2); const int channels = value.size(3); const int num_levels = spatial_shapes.size(0); const int num_query = sampling_loc.size(1); const int num_point = sampling_loc.size(4); const int im2col_step_ = std::min(batch, im2col_step); AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_); auto output = at::zeros({batch, num_query, num_heads, channels}, value.options()); const int batch_n = im2col_step_; auto output_n = output.view({batch/im2col_step_, batch_n, num_query, num_heads, channels}); auto per_value_size = spatial_size * num_heads * channels; auto per_sample_loc_size = num_query * num_heads * num_levels * num_point * 2; auto per_attn_weight_size = num_query * num_heads * num_levels * num_point; for (int n = 0; n < batch/im2col_step_; ++n) { auto columns = output_n.select(0, n); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, value.type(), "ms_deform_attn_forward_cuda", ([&] { ms_deformable_im2col_cuda(at::cuda::getCurrentCUDAStream(), value.data<scalar_t>() + n * im2col_step_ * per_value_size, spatial_shapes.data<int64_t>(), level_start_index.data<int64_t>(), sampling_loc.data<scalar_t>() + n * im2col_step_ * per_sample_loc_size, attn_weight.data<scalar_t>() + n * im2col_step_ * per_attn_weight_size, batch_n, spatial_size, num_heads, channels, num_levels, num_query, num_point, columns.data<scalar_t>()); })); } output = output.view({batch, num_query, num_heads*channels}); return output; } std::vector<at::Tensor> ms_deform_attn_cuda_backward( const at::Tensor &value, const at::Tensor &spatial_shapes, const at::Tensor &level_start_index, const at::Tensor &sampling_loc, const at::Tensor &attn_weight, const at::Tensor &grad_output, const int im2col_step) { AT_ASSERTM(value.is_contiguous(), "value tensor has to be contiguous"); AT_ASSERTM(spatial_shapes.is_contiguous(), "spatial_shapes tensor has to be contiguous"); AT_ASSERTM(level_start_index.is_contiguous(), "level_start_index tensor has to be contiguous"); AT_ASSERTM(sampling_loc.is_contiguous(), "sampling_loc tensor has to be contiguous"); AT_ASSERTM(attn_weight.is_contiguous(), "attn_weight tensor has to be contiguous"); AT_ASSERTM(grad_output.is_contiguous(), "grad_output tensor has to be contiguous"); AT_ASSERTM(value.type().is_cuda(), "value must be a CUDA tensor"); AT_ASSERTM(spatial_shapes.type().is_cuda(), "spatial_shapes must be a CUDA tensor"); AT_ASSERTM(level_start_index.type().is_cuda(), "level_start_index must be a CUDA tensor"); AT_ASSERTM(sampling_loc.type().is_cuda(), "sampling_loc must be a CUDA tensor"); AT_ASSERTM(attn_weight.type().is_cuda(), "attn_weight must be a CUDA tensor"); AT_ASSERTM(grad_output.type().is_cuda(), "grad_output must be a CUDA tensor"); const int batch = value.size(0); const int spatial_size = value.size(1); const int num_heads = value.size(2); const int channels = value.size(3); const int num_levels = spatial_shapes.size(0); const int num_query = sampling_loc.size(1); const int num_point = sampling_loc.size(4); const int im2col_step_ = std::min(batch, im2col_step); AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_); auto grad_value = at::zeros_like(value); auto grad_sampling_loc = at::zeros_like(sampling_loc); auto grad_attn_weight = at::zeros_like(attn_weight); const int batch_n = im2col_step_; auto per_value_size = spatial_size * num_heads * channels; auto per_sample_loc_size = num_query * num_heads * num_levels * num_point * 2; auto per_attn_weight_size = num_query * num_heads * num_levels * num_point; auto grad_output_n = grad_output.view({batch/im2col_step_, batch_n, num_query, num_heads, channels}); for (int n = 0; n < batch/im2col_step_; ++n) { auto grad_output_g = grad_output_n.select(0, n); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, value.type(), "ms_deform_attn_backward_cuda", ([&] { ms_deformable_col2im_cuda(at::cuda::getCurrentCUDAStream(), grad_output_g.data<scalar_t>(), value.data<scalar_t>() + n * im2col_step_ * per_value_size, spatial_shapes.data<int64_t>(), level_start_index.data<int64_t>(), sampling_loc.data<scalar_t>() + n * im2col_step_ * per_sample_loc_size, attn_weight.data<scalar_t>() + n * im2col_step_ * per_attn_weight_size, batch_n, spatial_size, num_heads, channels, num_levels, num_query, num_point, grad_value.data<scalar_t>() + n * im2col_step_ * per_value_size, grad_sampling_loc.data<scalar_t>() + n * im2col_step_ * per_sample_loc_size, grad_attn_weight.data<scalar_t>() + n * im2col_step_ * per_attn_weight_size); })); } return { grad_value, grad_sampling_loc, grad_attn_weight }; }
transformers/src/transformers/kernels/deformable_detr/cuda/ms_deform_attn_cuda.cu/0
{ "file_path": "transformers/src/transformers/kernels/deformable_detr/cuda/ms_deform_attn_cuda.cu", "repo_id": "transformers", "token_count": 3216 }
316
#include <torch/extension.h> #include <ATen/ATen.h> #include "cuda_launch.h" #include "cuda_kernel.h" #include <vector> ////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////// std::vector<at::Tensor> index_max_kernel( at::Tensor index_vals, // [batch_size, 32, num_block] at::Tensor indices, // [batch_size, num_block], int A_num_block, int B_num_block ) { int batch_size = indices.size(0); int num_block = indices.size(1); at::Tensor max_vals = at::zeros({batch_size, A_num_block * 32}, index_vals.options()); at::Tensor max_vals_scatter = at::zeros({batch_size, 32, num_block}, index_vals.options()); dim3 threads(256); dim3 blocks(batch_size); int shared_mem = A_num_block * 32 * sizeof(float); index_max_cuda_kernel<<<blocks, threads, shared_mem>>>( index_vals.data_ptr<float>(), indices.data_ptr<int>(), max_vals.data_ptr<float>(), max_vals_scatter.data_ptr<float>(), batch_size, A_num_block, B_num_block, num_block ); return {max_vals, max_vals_scatter}; } at::Tensor mm_to_sparse_kernel( at::Tensor dense_A, // [batch_size, A_num_block, dim, 32] at::Tensor dense_B, // [batch_size, B_num_block, dim, 32] at::Tensor indices // [batch_size, num_block] ) { int batch_size = dense_A.size(0); int A_num_block = dense_A.size(1); int B_num_block = dense_B.size(1); int dim = dense_A.size(2); int num_block = indices.size(1); at::Tensor sparse_C = at::zeros({batch_size, num_block, 32, 32}, dense_A.options()); dim3 threads(64, 4); dim3 blocks(num_block / 4, batch_size); mm_to_sparse_cuda_kernel<<<blocks, threads>>>( dense_A.data_ptr<float>(), dense_B.data_ptr<float>(), indices.data_ptr<int>(), sparse_C.data_ptr<float>(), batch_size, A_num_block, B_num_block, dim, num_block ); return sparse_C; } at::Tensor sparse_dense_mm_kernel( at::Tensor sparse_A, // [batch_size, num_block, 32, 32] at::Tensor indices, // [batch_size, num_block] at::Tensor dense_B, // [batch_size, B_num_block, dim, 32] int A_num_block ) { int batch_size = sparse_A.size(0); int num_block = sparse_A.size(1); int B_num_block = dense_B.size(1); int dim = dense_B.size(2); at::Tensor dense_C = at::zeros({batch_size, A_num_block, dim, 32}, dense_B.options()); dim3 threads(128, 2); dim3 blocks(num_block / 2, batch_size); sparse_dense_mm_cuda_kernel<<<blocks, threads>>>( sparse_A.data_ptr<float>(), indices.data_ptr<int>(), dense_B.data_ptr<float>(), dense_C.data_ptr<float>(), batch_size, A_num_block, B_num_block, dim, num_block ); return dense_C; } at::Tensor reduce_sum_kernel( at::Tensor sparse_A, // [batch_size, num_block, 32, 32] at::Tensor indices, // [batch_size, num_block] int A_num_block, int B_num_block ) { int batch_size = sparse_A.size(0); int num_block = sparse_A.size(1); at::Tensor dense_C = at::zeros({batch_size, A_num_block, 32}, sparse_A.options()); dim3 threads(32, 4); dim3 blocks(num_block / 4, batch_size); reduce_sum_cuda_kernel<<<blocks, threads>>>( sparse_A.data_ptr<float>(), indices.data_ptr<int>(), dense_C.data_ptr<float>(), batch_size, A_num_block, B_num_block, num_block ); return dense_C; } at::Tensor scatter_kernel( at::Tensor dense_A, // [batch_size, A_num_block, 32] at::Tensor indices, // [batch_size, num_block] int B_num_block ) { int batch_size = dense_A.size(0); int A_num_block = dense_A.size(1); int num_block = indices.size(1); at::Tensor sparse_C = at::zeros({batch_size, num_block, 32, 32}, dense_A.options()); dim3 threads(32, 4); dim3 blocks(num_block / 4, batch_size); scatter_cuda_kernel<<<blocks, threads>>>( dense_A.data_ptr<float>(), indices.data_ptr<int>(), sparse_C.data_ptr<float>(), batch_size, A_num_block, B_num_block, num_block ); return sparse_C; }
transformers/src/transformers/kernels/mra/cuda_launch.cu/0
{ "file_path": "transformers/src/transformers/kernels/mra/cuda_launch.cu", "repo_id": "transformers", "token_count": 1668 }
317
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Dict, Optional, Tuple import flax import jax.numpy as jnp from .utils import ModelOutput @flax.struct.dataclass class FlaxBaseModelOutput(ModelOutput): """ Base class for model's outputs, with potential hidden states and attentions. Args: last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxBaseModelOutputWithNoAttention(ModelOutput): """ Base class for model's outputs, with potential hidden states. Args: last_hidden_state (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, num_channels, height, width)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. """ last_hidden_state: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxBaseModelOutputWithPoolingAndNoAttention(ModelOutput): """ Base class for model's outputs that also contains a pooling of the last hidden states. Args: last_hidden_state (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (`jnp.ndarray` of shape `(batch_size, hidden_size)`): Last layer hidden-state after a pooling operation on the spatial dimensions. hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, num_channels, height, width)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. """ last_hidden_state: jnp.ndarray = None pooler_output: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxImageClassifierOutputWithNoAttention(ModelOutput): """ Base class for outputs of image classification models. Args: logits (`jnp.ndarray` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each stage) of shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the model at the output of each stage. """ logits: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxBaseModelOutputWithPast(ModelOutput): """ Base class for model's outputs, with potential hidden states and attentions. Args: last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. past_key_values (`Dict[str, jnp.ndarray]`): Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*. hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: jnp.ndarray = None past_key_values: Optional[Dict[str, jnp.ndarray]] = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxBaseModelOutputWithPooling(ModelOutput): """ Base class for model's outputs that also contains a pooling of the last hidden states. Args: last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (`jnp.ndarray` of shape `(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining. hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: jnp.ndarray = None pooler_output: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxBaseModelOutputWithPoolingAndCrossAttentions(ModelOutput): """ Base class for model's outputs that also contains a pooling of the last hidden states. Args: last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (`jnp.ndarray` of shape `(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining. hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(jnp.ndarray)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. """ last_hidden_state: jnp.ndarray = None pooler_output: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None attentions: Optional[Tuple[jnp.ndarray]] = None cross_attentions: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxBaseModelOutputWithPastAndCrossAttentions(ModelOutput): """ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding). Args: last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, hidden_size)` is output. past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(jnp.ndarray)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. """ last_hidden_state: jnp.ndarray = None past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None cross_attentions: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxSeq2SeqModelOutput(ModelOutput): """ Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential decoding. Args: last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the decoder of the model. If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, hidden_size)` is output. past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(jnp.ndarray)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: jnp.ndarray = None past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None decoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None decoder_attentions: Optional[Tuple[jnp.ndarray]] = None cross_attentions: Optional[Tuple[jnp.ndarray]] = None encoder_last_hidden_state: Optional[jnp.ndarray] = None encoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None encoder_attentions: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxCausalLMOutputWithCrossAttentions(ModelOutput): """ Base class for causal language model (or autoregressive) outputs. Args: logits (`jnp.ndarray` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads. past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `jnp.ndarray` tuples of length `config.n_layers`, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if `config.is_decoder = True`. Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. """ logits: jnp.ndarray = None past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None cross_attentions: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxMaskedLMOutput(ModelOutput): """ Base class for masked language models outputs. Args: logits (`jnp.ndarray` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ logits: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None FlaxCausalLMOutput = FlaxMaskedLMOutput @flax.struct.dataclass class FlaxSeq2SeqLMOutput(ModelOutput): """ Base class for sequence-to-sequence language models outputs. Args: logits (`jnp.ndarray` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(jnp.ndarray)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ logits: jnp.ndarray = None past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None decoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None decoder_attentions: Optional[Tuple[jnp.ndarray]] = None cross_attentions: Optional[Tuple[jnp.ndarray]] = None encoder_last_hidden_state: Optional[jnp.ndarray] = None encoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None encoder_attentions: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxNextSentencePredictorOutput(ModelOutput): """ Base class for outputs of models predicting if two sentences are consecutive or not. Args: logits (`jnp.ndarray` of shape `(batch_size, 2)`): Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax). hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ logits: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxSequenceClassifierOutput(ModelOutput): """ Base class for outputs of sentence classification models. Args: logits (`jnp.ndarray` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ logits: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxSeq2SeqSequenceClassifierOutput(ModelOutput): """ Base class for outputs of sequence-to-sequence sentence classification models. Args: logits (`jnp.ndarray` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(jnp.ndarray)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ logits: jnp.ndarray = None past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None decoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None decoder_attentions: Optional[Tuple[jnp.ndarray]] = None cross_attentions: Optional[Tuple[jnp.ndarray]] = None encoder_last_hidden_state: Optional[jnp.ndarray] = None encoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None encoder_attentions: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxMultipleChoiceModelOutput(ModelOutput): """ Base class for outputs of multiple choice models. Args: logits (`jnp.ndarray` of shape `(batch_size, num_choices)`): *num_choices* is the second dimension of the input tensors. (see *input_ids* above). Classification scores (before SoftMax). hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ logits: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxTokenClassifierOutput(ModelOutput): """ Base class for outputs of token classification models. Args: logits (`jnp.ndarray` of shape `(batch_size, sequence_length, config.num_labels)`): Classification scores (before SoftMax). hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ logits: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxQuestionAnsweringModelOutput(ModelOutput): """ Base class for outputs of question answering models. Args: start_logits (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Span-start scores (before SoftMax). end_logits (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Span-end scores (before SoftMax). hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ start_logits: jnp.ndarray = None end_logits: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxSeq2SeqQuestionAnsweringModelOutput(ModelOutput): """ Base class for outputs of sequence-to-sequence question answering models. Args: start_logits (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Span-start scores (before SoftMax). end_logits (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Span-end scores (before SoftMax). past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(jnp.ndarray)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ start_logits: jnp.ndarray = None end_logits: jnp.ndarray = None past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None decoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None decoder_attentions: Optional[Tuple[jnp.ndarray]] = None cross_attentions: Optional[Tuple[jnp.ndarray]] = None encoder_last_hidden_state: Optional[jnp.ndarray] = None encoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None encoder_attentions: Optional[Tuple[jnp.ndarray]] = None
transformers/src/transformers/modeling_flax_outputs.py/0
{ "file_path": "transformers/src/transformers/modeling_flax_outputs.py", "repo_id": "transformers", "token_count": 15429 }
318
# coding=utf-8 # Copyright 2018 Google AI, Google Brain and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tokenization classes for ALBERT model.""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: AlbertTokenizer = None logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "albert/albert-base-v1": "https://huggingface.co/albert/albert-base-v1/resolve/main/spiece.model", "albert/albert-large-v1": "https://huggingface.co/albert/albert-large-v1/resolve/main/spiece.model", "albert/albert-xlarge-v1": "https://huggingface.co/albert/albert-xlarge-v1/resolve/main/spiece.model", "albert/albert-xxlarge-v1": "https://huggingface.co/albert/albert-xxlarge-v1/resolve/main/spiece.model", "albert/albert-base-v2": "https://huggingface.co/albert/albert-base-v2/resolve/main/spiece.model", "albert/albert-large-v2": "https://huggingface.co/albert/albert-large-v2/resolve/main/spiece.model", "albert/albert-xlarge-v2": "https://huggingface.co/albert/albert-xlarge-v2/resolve/main/spiece.model", "albert/albert-xxlarge-v2": "https://huggingface.co/albert/albert-xxlarge-v2/resolve/main/spiece.model", }, "tokenizer_file": { "albert/albert-base-v1": "https://huggingface.co/albert/albert-base-v1/resolve/main/tokenizer.json", "albert/albert-large-v1": "https://huggingface.co/albert/albert-large-v1/resolve/main/tokenizer.json", "albert/albert-xlarge-v1": "https://huggingface.co/albert/albert-xlarge-v1/resolve/main/tokenizer.json", "albert/albert-xxlarge-v1": "https://huggingface.co/albert/albert-xxlarge-v1/resolve/main/tokenizer.json", "albert/albert-base-v2": "https://huggingface.co/albert/albert-base-v2/resolve/main/tokenizer.json", "albert/albert-large-v2": "https://huggingface.co/albert/albert-large-v2/resolve/main/tokenizer.json", "albert/albert-xlarge-v2": "https://huggingface.co/albert/albert-xlarge-v2/resolve/main/tokenizer.json", "albert/albert-xxlarge-v2": "https://huggingface.co/albert/albert-xxlarge-v2/resolve/main/tokenizer.json", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "albert/albert-base-v1": 512, "albert/albert-large-v1": 512, "albert/albert-xlarge-v1": 512, "albert/albert-xxlarge-v1": 512, "albert/albert-base-v2": 512, "albert/albert-large-v2": 512, "albert/albert-xlarge-v2": 512, "albert/albert-xxlarge-v2": 512, } SPIECE_UNDERLINE = "▁" class AlbertTokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" ALBERT tokenizer (backed by HuggingFace's *tokenizers* library). Based on [Unigram](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models). This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods Args: vocab_file (`str`): [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that contains the vocabulary necessary to instantiate a tokenizer. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. remove_space (`bool`, *optional*, defaults to `True`): Whether or not to strip the text when tokenizing (removing excess spaces before and after the string). keep_accents (`bool`, *optional*, defaults to `False`): Whether or not to keep accents when tokenizing. bos_token (`str`, *optional*, defaults to `"[CLS]"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"[SEP]"`): The end of sequence token. .. note:: When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES slow_tokenizer_class = AlbertTokenizer def __init__( self, vocab_file=None, tokenizer_file=None, do_lower_case=True, remove_space=True, keep_accents=False, bos_token="[CLS]", eos_token="[SEP]", unk_token="<unk>", sep_token="[SEP]", pad_token="<pad>", cls_token="[CLS]", mask_token="[MASK]", **kwargs, ): # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. mask_token = ( AddedToken(mask_token, lstrip=True, rstrip=False, normalized=False) if isinstance(mask_token, str) else mask_token ) super().__init__( vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, remove_space=remove_space, keep_accents=keep_accents, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, **kwargs, ) self.do_lower_case = do_lower_case self.remove_space = remove_space self.keep_accents = keep_accents self.vocab_file = vocab_file @property def can_save_slow_tokenizer(self) -> bool: return os.path.isfile(self.vocab_file) if self.vocab_file else False def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An ALBERT sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return cls + token_ids_0 + sep return cls + token_ids_0 + sep + token_ids_1 + sep def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` if token_ids_1 is None, only returns the first portion of the mask (0s). Args: token_ids_0 (`List[int]`): List of ids. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,)
transformers/src/transformers/models/albert/tokenization_albert_fast.py/0
{ "file_path": "transformers/src/transformers/models/albert/tokenization_albert_fast.py", "repo_id": "transformers", "token_count": 4690 }
319
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Factory function to build auto-model classes.""" import copy import importlib import json import os import warnings from collections import OrderedDict from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...utils import ( CONFIG_NAME, cached_file, copy_func, extract_commit_hash, find_adapter_config_file, is_peft_available, logging, requires_backends, ) from .configuration_auto import AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings logger = logging.get_logger(__name__) CLASS_DOCSTRING = """ This is a generic model class that will be instantiated as one of the model classes of the library when created with the [`~BaseAutoModelClass.from_pretrained`] class method or the [`~BaseAutoModelClass.from_config`] class method. This class cannot be instantiated directly using `__init__()` (throws an error). """ FROM_CONFIG_DOCSTRING = """ Instantiates one of the model classes of the library from a configuration. Note: Loading a model from its configuration file does **not** load the model weights. It only affects the model's configuration. Use [`~BaseAutoModelClass.from_pretrained`] to load the model weights. Args: config ([`PretrainedConfig`]): The model class to instantiate is selected based on the configuration class: List options attn_implementation (`str`, *optional*): The attention implementation to use in the model (if relevant). Can be any of `"eager"` (manual implementation of the attention), `"sdpa"` (using [`F.scaled_dot_product_attention`](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention.html)), or `"flash_attention_2"` (using [Dao-AILab/flash-attention](https://github.com/Dao-AILab/flash-attention)). By default, if available, SDPA will be used for torch>=2.1.1. The default is otherwise the manual `"eager"` implementation. Examples: ```python >>> from transformers import AutoConfig, BaseAutoModelClass >>> # Download configuration from huggingface.co and cache. >>> config = AutoConfig.from_pretrained("checkpoint_placeholder") >>> model = BaseAutoModelClass.from_config(config) ``` """ FROM_PRETRAINED_TORCH_DOCSTRING = """ Instantiate one of the model classes of the library from a pretrained model. The model class to instantiate is selected based on the `model_type` property of the config object (either passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's missing, by falling back to using pattern matching on `pretrained_model_name_or_path`: List options The model is set in evaluation mode by default using `model.eval()` (so for instance, dropout modules are deactivated). To train the model, you should first set it back in training mode with `model.train()` Args: pretrained_model_name_or_path (`str` or `os.PathLike`): Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. - A path to a *directory* containing model weights saved using [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In this case, `from_tf` should be set to `True` and a configuration object should be provided as `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. model_args (additional positional arguments, *optional*): Will be passed along to the underlying model `__init__()` method. config ([`PretrainedConfig`], *optional*): Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when: - The model is a model provided by the library (loaded with the *model id* string of a pretrained model). - The model was saved using [`~PreTrainedModel.save_pretrained`] and is reloaded by supplying the save directory. - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a configuration JSON file named *config.json* is found in the directory. state_dict (*Dict[str, torch.Tensor]*, *optional*): A state dictionary to use instead of a state dictionary loaded from saved weights file. This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using [`~PreTrainedModel.save_pretrained`] and [`~PreTrainedModel.from_pretrained`] is not a simpler option. cache_dir (`str` or `os.PathLike`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. from_tf (`bool`, *optional*, defaults to `False`): Load the model weights from a TensorFlow checkpoint save file (see docstring of `pretrained_model_name_or_path` argument). force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. output_loading_info(`bool`, *optional*, defaults to `False`): Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only(`bool`, *optional*, defaults to `False`): Whether or not to only look at local files (e.g., not try downloading the model). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. trust_remote_code (`bool`, *optional*, defaults to `False`): Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine. code_revision (`str`, *optional*, defaults to `"main"`): The specific revision to use for the code on the Hub, if the code leaves in a different repository than the rest of the model. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. kwargs (additional keyword arguments, *optional*): Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., `output_attentions=True`). Behaves differently depending on whether a `config` is provided or automatically loaded: - If a configuration is provided with `config`, `**kwargs` will be directly passed to the underlying model's `__init__` method (we assume all relevant updates to the configuration have already been done) - If a configuration is not provided, `kwargs` will be first passed to the configuration class initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that corresponds to a configuration attribute will be used to override said attribute with the supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's `__init__` function. Examples: ```python >>> from transformers import AutoConfig, BaseAutoModelClass >>> # Download model and configuration from huggingface.co and cache. >>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder") >>> # Update configuration during loading >>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder", output_attentions=True) >>> model.config.output_attentions True >>> # Loading from a TF checkpoint file instead of a PyTorch model (slower) >>> config = AutoConfig.from_pretrained("./tf_model/shortcut_placeholder_tf_model_config.json") >>> model = BaseAutoModelClass.from_pretrained( ... "./tf_model/shortcut_placeholder_tf_checkpoint.ckpt.index", from_tf=True, config=config ... ) ``` """ FROM_PRETRAINED_TF_DOCSTRING = """ Instantiate one of the model classes of the library from a pretrained model. The model class to instantiate is selected based on the `model_type` property of the config object (either passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's missing, by falling back to using pattern matching on `pretrained_model_name_or_path`: List options Args: pretrained_model_name_or_path (`str` or `os.PathLike`): Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. - A path to a *directory* containing model weights saved using [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. - A path or url to a *PyTorch state_dict save file* (e.g, `./pt_model/pytorch_model.bin`). In this case, `from_pt` should be set to `True` and a configuration object should be provided as `config` argument. This loading path is slower than converting the PyTorch model in a TensorFlow model using the provided conversion scripts and loading the TensorFlow model afterwards. model_args (additional positional arguments, *optional*): Will be passed along to the underlying model `__init__()` method. config ([`PretrainedConfig`], *optional*): Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when: - The model is a model provided by the library (loaded with the *model id* string of a pretrained model). - The model was saved using [`~PreTrainedModel.save_pretrained`] and is reloaded by supplying the save directory. - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a configuration JSON file named *config.json* is found in the directory. cache_dir (`str` or `os.PathLike`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. from_pt (`bool`, *optional*, defaults to `False`): Load the model weights from a PyTorch checkpoint save file (see docstring of `pretrained_model_name_or_path` argument). force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. output_loading_info(`bool`, *optional*, defaults to `False`): Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only(`bool`, *optional*, defaults to `False`): Whether or not to only look at local files (e.g., not try downloading the model). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. trust_remote_code (`bool`, *optional*, defaults to `False`): Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine. code_revision (`str`, *optional*, defaults to `"main"`): The specific revision to use for the code on the Hub, if the code leaves in a different repository than the rest of the model. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. kwargs (additional keyword arguments, *optional*): Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., `output_attentions=True`). Behaves differently depending on whether a `config` is provided or automatically loaded: - If a configuration is provided with `config`, `**kwargs` will be directly passed to the underlying model's `__init__` method (we assume all relevant updates to the configuration have already been done) - If a configuration is not provided, `kwargs` will be first passed to the configuration class initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that corresponds to a configuration attribute will be used to override said attribute with the supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's `__init__` function. Examples: ```python >>> from transformers import AutoConfig, BaseAutoModelClass >>> # Download model and configuration from huggingface.co and cache. >>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder") >>> # Update configuration during loading >>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder", output_attentions=True) >>> model.config.output_attentions True >>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower) >>> config = AutoConfig.from_pretrained("./pt_model/shortcut_placeholder_pt_model_config.json") >>> model = BaseAutoModelClass.from_pretrained( ... "./pt_model/shortcut_placeholder_pytorch_model.bin", from_pt=True, config=config ... ) ``` """ FROM_PRETRAINED_FLAX_DOCSTRING = """ Instantiate one of the model classes of the library from a pretrained model. The model class to instantiate is selected based on the `model_type` property of the config object (either passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's missing, by falling back to using pattern matching on `pretrained_model_name_or_path`: List options Args: pretrained_model_name_or_path (`str` or `os.PathLike`): Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. - A path to a *directory* containing model weights saved using [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. - A path or url to a *PyTorch state_dict save file* (e.g, `./pt_model/pytorch_model.bin`). In this case, `from_pt` should be set to `True` and a configuration object should be provided as `config` argument. This loading path is slower than converting the PyTorch model in a TensorFlow model using the provided conversion scripts and loading the TensorFlow model afterwards. model_args (additional positional arguments, *optional*): Will be passed along to the underlying model `__init__()` method. config ([`PretrainedConfig`], *optional*): Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when: - The model is a model provided by the library (loaded with the *model id* string of a pretrained model). - The model was saved using [`~PreTrainedModel.save_pretrained`] and is reloaded by supplying the save directory. - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a configuration JSON file named *config.json* is found in the directory. cache_dir (`str` or `os.PathLike`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. from_pt (`bool`, *optional*, defaults to `False`): Load the model weights from a PyTorch checkpoint save file (see docstring of `pretrained_model_name_or_path` argument). force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. output_loading_info(`bool`, *optional*, defaults to `False`): Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only(`bool`, *optional*, defaults to `False`): Whether or not to only look at local files (e.g., not try downloading the model). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. trust_remote_code (`bool`, *optional*, defaults to `False`): Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine. code_revision (`str`, *optional*, defaults to `"main"`): The specific revision to use for the code on the Hub, if the code leaves in a different repository than the rest of the model. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. kwargs (additional keyword arguments, *optional*): Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., `output_attentions=True`). Behaves differently depending on whether a `config` is provided or automatically loaded: - If a configuration is provided with `config`, `**kwargs` will be directly passed to the underlying model's `__init__` method (we assume all relevant updates to the configuration have already been done) - If a configuration is not provided, `kwargs` will be first passed to the configuration class initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that corresponds to a configuration attribute will be used to override said attribute with the supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's `__init__` function. Examples: ```python >>> from transformers import AutoConfig, BaseAutoModelClass >>> # Download model and configuration from huggingface.co and cache. >>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder") >>> # Update configuration during loading >>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder", output_attentions=True) >>> model.config.output_attentions True >>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower) >>> config = AutoConfig.from_pretrained("./pt_model/shortcut_placeholder_pt_model_config.json") >>> model = BaseAutoModelClass.from_pretrained( ... "./pt_model/shortcut_placeholder_pytorch_model.bin", from_pt=True, config=config ... ) ``` """ def _get_model_class(config, model_mapping): supported_models = model_mapping[type(config)] if not isinstance(supported_models, (list, tuple)): return supported_models name_to_model = {model.__name__: model for model in supported_models} architectures = getattr(config, "architectures", []) for arch in architectures: if arch in name_to_model: return name_to_model[arch] elif f"TF{arch}" in name_to_model: return name_to_model[f"TF{arch}"] elif f"Flax{arch}" in name_to_model: return name_to_model[f"Flax{arch}"] # If not architecture is set in the config or match the supported models, the first element of the tuple is the # defaults. return supported_models[0] class _BaseAutoModelClass: # Base class for auto models. _model_mapping = None def __init__(self, *args, **kwargs): raise EnvironmentError( f"{self.__class__.__name__} is designed to be instantiated " f"using the `{self.__class__.__name__}.from_pretrained(pretrained_model_name_or_path)` or " f"`{self.__class__.__name__}.from_config(config)` methods." ) @classmethod def from_config(cls, config, **kwargs): trust_remote_code = kwargs.pop("trust_remote_code", None) has_remote_code = hasattr(config, "auto_map") and cls.__name__ in config.auto_map has_local_code = type(config) in cls._model_mapping.keys() trust_remote_code = resolve_trust_remote_code( trust_remote_code, config._name_or_path, has_local_code, has_remote_code ) if has_remote_code and trust_remote_code: class_ref = config.auto_map[cls.__name__] if "--" in class_ref: repo_id, class_ref = class_ref.split("--") else: repo_id = config.name_or_path model_class = get_class_from_dynamic_module(class_ref, repo_id, **kwargs) if os.path.isdir(config._name_or_path): model_class.register_for_auto_class(cls.__name__) else: cls.register(config.__class__, model_class, exist_ok=True) _ = kwargs.pop("code_revision", None) return model_class._from_config(config, **kwargs) elif type(config) in cls._model_mapping.keys(): model_class = _get_model_class(config, cls._model_mapping) return model_class._from_config(config, **kwargs) raise ValueError( f"Unrecognized configuration class {config.__class__} for this kind of AutoModel: {cls.__name__}.\n" f"Model type should be one of {', '.join(c.__name__ for c in cls._model_mapping.keys())}." ) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): config = kwargs.pop("config", None) trust_remote_code = kwargs.pop("trust_remote_code", None) kwargs["_from_auto"] = True hub_kwargs_names = [ "cache_dir", "force_download", "local_files_only", "proxies", "resume_download", "revision", "subfolder", "use_auth_token", "token", ] hub_kwargs = {name: kwargs.pop(name) for name in hub_kwargs_names if name in kwargs} code_revision = kwargs.pop("code_revision", None) commit_hash = kwargs.pop("_commit_hash", None) adapter_kwargs = kwargs.pop("adapter_kwargs", None) token = hub_kwargs.pop("token", None) use_auth_token = hub_kwargs.pop("use_auth_token", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) token = use_auth_token if token is not None: hub_kwargs["token"] = token if commit_hash is None: if not isinstance(config, PretrainedConfig): # We make a call to the config file first (which may be absent) to get the commit hash as soon as possible resolved_config_file = cached_file( pretrained_model_name_or_path, CONFIG_NAME, _raise_exceptions_for_gated_repo=False, _raise_exceptions_for_missing_entries=False, _raise_exceptions_for_connection_errors=False, **hub_kwargs, ) commit_hash = extract_commit_hash(resolved_config_file, commit_hash) else: commit_hash = getattr(config, "_commit_hash", None) if is_peft_available(): if adapter_kwargs is None: adapter_kwargs = {} if token is not None: adapter_kwargs["token"] = token maybe_adapter_path = find_adapter_config_file( pretrained_model_name_or_path, _commit_hash=commit_hash, **adapter_kwargs ) if maybe_adapter_path is not None: with open(maybe_adapter_path, "r", encoding="utf-8") as f: adapter_config = json.load(f) adapter_kwargs["_adapter_model_path"] = pretrained_model_name_or_path pretrained_model_name_or_path = adapter_config["base_model_name_or_path"] if not isinstance(config, PretrainedConfig): kwargs_orig = copy.deepcopy(kwargs) # ensure not to pollute the config object with torch_dtype="auto" - since it's # meaningless in the context of the config object - torch.dtype values are acceptable if kwargs.get("torch_dtype", None) == "auto": _ = kwargs.pop("torch_dtype") # to not overwrite the quantization_config if config has a quantization_config if kwargs.get("quantization_config", None) is not None: _ = kwargs.pop("quantization_config") config, kwargs = AutoConfig.from_pretrained( pretrained_model_name_or_path, return_unused_kwargs=True, trust_remote_code=trust_remote_code, code_revision=code_revision, _commit_hash=commit_hash, **hub_kwargs, **kwargs, ) # if torch_dtype=auto was passed here, ensure to pass it on if kwargs_orig.get("torch_dtype", None) == "auto": kwargs["torch_dtype"] = "auto" if kwargs_orig.get("quantization_config", None) is not None: kwargs["quantization_config"] = kwargs_orig["quantization_config"] has_remote_code = hasattr(config, "auto_map") and cls.__name__ in config.auto_map has_local_code = type(config) in cls._model_mapping.keys() trust_remote_code = resolve_trust_remote_code( trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code ) # Set the adapter kwargs kwargs["adapter_kwargs"] = adapter_kwargs if has_remote_code and trust_remote_code: class_ref = config.auto_map[cls.__name__] model_class = get_class_from_dynamic_module( class_ref, pretrained_model_name_or_path, code_revision=code_revision, **hub_kwargs, **kwargs ) _ = hub_kwargs.pop("code_revision", None) if os.path.isdir(pretrained_model_name_or_path): model_class.register_for_auto_class(cls.__name__) else: cls.register(config.__class__, model_class, exist_ok=True) return model_class.from_pretrained( pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs ) elif type(config) in cls._model_mapping.keys(): model_class = _get_model_class(config, cls._model_mapping) return model_class.from_pretrained( pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs ) raise ValueError( f"Unrecognized configuration class {config.__class__} for this kind of AutoModel: {cls.__name__}.\n" f"Model type should be one of {', '.join(c.__name__ for c in cls._model_mapping.keys())}." ) @classmethod def register(cls, config_class, model_class, exist_ok=False): """ Register a new model for this class. Args: config_class ([`PretrainedConfig`]): The configuration corresponding to the model to register. model_class ([`PreTrainedModel`]): The model to register. """ if hasattr(model_class, "config_class") and model_class.config_class != config_class: raise ValueError( "The model class you are passing has a `config_class` attribute that is not consistent with the " f"config class you passed (model has {model_class.config_class} and you passed {config_class}. Fix " "one of those so they match!" ) cls._model_mapping.register(config_class, model_class, exist_ok=exist_ok) class _BaseAutoBackboneClass(_BaseAutoModelClass): # Base class for auto backbone models. _model_mapping = None @classmethod def _load_timm_backbone_from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): requires_backends(cls, ["vision", "timm"]) from ...models.timm_backbone import TimmBackboneConfig config = kwargs.pop("config", TimmBackboneConfig()) if kwargs.get("out_features", None) is not None: raise ValueError("Cannot specify `out_features` for timm backbones") if kwargs.get("output_loading_info", False): raise ValueError("Cannot specify `output_loading_info=True` when loading from timm") num_channels = kwargs.pop("num_channels", config.num_channels) features_only = kwargs.pop("features_only", config.features_only) use_pretrained_backbone = kwargs.pop("use_pretrained_backbone", config.use_pretrained_backbone) out_indices = kwargs.pop("out_indices", config.out_indices) config = TimmBackboneConfig( backbone=pretrained_model_name_or_path, num_channels=num_channels, features_only=features_only, use_pretrained_backbone=use_pretrained_backbone, out_indices=out_indices, ) return super().from_config(config, **kwargs) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): use_timm_backbone = kwargs.pop("use_timm_backbone", False) if use_timm_backbone: return cls._load_timm_backbone_from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) def insert_head_doc(docstring, head_doc=""): if len(head_doc) > 0: return docstring.replace( "one of the model classes of the library ", f"one of the model classes of the library (with a {head_doc} head) ", ) return docstring.replace( "one of the model classes of the library ", "one of the base model classes of the library " ) def auto_class_update(cls, checkpoint_for_example="google-bert/bert-base-cased", head_doc=""): # Create a new class with the right name from the base class model_mapping = cls._model_mapping name = cls.__name__ class_docstring = insert_head_doc(CLASS_DOCSTRING, head_doc=head_doc) cls.__doc__ = class_docstring.replace("BaseAutoModelClass", name) # Now we need to copy and re-register `from_config` and `from_pretrained` as class methods otherwise we can't # have a specific docstrings for them. from_config = copy_func(_BaseAutoModelClass.from_config) from_config_docstring = insert_head_doc(FROM_CONFIG_DOCSTRING, head_doc=head_doc) from_config_docstring = from_config_docstring.replace("BaseAutoModelClass", name) from_config_docstring = from_config_docstring.replace("checkpoint_placeholder", checkpoint_for_example) from_config.__doc__ = from_config_docstring from_config = replace_list_option_in_docstrings(model_mapping._model_mapping, use_model_types=False)(from_config) cls.from_config = classmethod(from_config) if name.startswith("TF"): from_pretrained_docstring = FROM_PRETRAINED_TF_DOCSTRING elif name.startswith("Flax"): from_pretrained_docstring = FROM_PRETRAINED_FLAX_DOCSTRING else: from_pretrained_docstring = FROM_PRETRAINED_TORCH_DOCSTRING from_pretrained = copy_func(_BaseAutoModelClass.from_pretrained) from_pretrained_docstring = insert_head_doc(from_pretrained_docstring, head_doc=head_doc) from_pretrained_docstring = from_pretrained_docstring.replace("BaseAutoModelClass", name) from_pretrained_docstring = from_pretrained_docstring.replace("checkpoint_placeholder", checkpoint_for_example) shortcut = checkpoint_for_example.split("/")[-1].split("-")[0] from_pretrained_docstring = from_pretrained_docstring.replace("shortcut_placeholder", shortcut) from_pretrained.__doc__ = from_pretrained_docstring from_pretrained = replace_list_option_in_docstrings(model_mapping._model_mapping)(from_pretrained) cls.from_pretrained = classmethod(from_pretrained) return cls def get_values(model_mapping): result = [] for model in model_mapping.values(): if isinstance(model, (list, tuple)): result += list(model) else: result.append(model) return result def getattribute_from_module(module, attr): if attr is None: return None if isinstance(attr, tuple): return tuple(getattribute_from_module(module, a) for a in attr) if hasattr(module, attr): return getattr(module, attr) # Some of the mappings have entries model_type -> object of another model type. In that case we try to grab the # object at the top level. transformers_module = importlib.import_module("transformers") if module != transformers_module: try: return getattribute_from_module(transformers_module, attr) except ValueError: raise ValueError(f"Could not find {attr} neither in {module} nor in {transformers_module}!") else: raise ValueError(f"Could not find {attr} in {transformers_module}!") class _LazyAutoMapping(OrderedDict): """ " A mapping config to object (model or tokenizer for instance) that will load keys and values when it is accessed. Args: - config_mapping: The map model type to config class - model_mapping: The map model type to model (or tokenizer) class """ def __init__(self, config_mapping, model_mapping): self._config_mapping = config_mapping self._reverse_config_mapping = {v: k for k, v in config_mapping.items()} self._model_mapping = model_mapping self._model_mapping._model_mapping = self self._extra_content = {} self._modules = {} def __len__(self): common_keys = set(self._config_mapping.keys()).intersection(self._model_mapping.keys()) return len(common_keys) + len(self._extra_content) def __getitem__(self, key): if key in self._extra_content: return self._extra_content[key] model_type = self._reverse_config_mapping[key.__name__] if model_type in self._model_mapping: model_name = self._model_mapping[model_type] return self._load_attr_from_module(model_type, model_name) # Maybe there was several model types associated with this config. model_types = [k for k, v in self._config_mapping.items() if v == key.__name__] for mtype in model_types: if mtype in self._model_mapping: model_name = self._model_mapping[mtype] return self._load_attr_from_module(mtype, model_name) raise KeyError(key) def _load_attr_from_module(self, model_type, attr): module_name = model_type_to_module_name(model_type) if module_name not in self._modules: self._modules[module_name] = importlib.import_module(f".{module_name}", "transformers.models") return getattribute_from_module(self._modules[module_name], attr) def keys(self): mapping_keys = [ self._load_attr_from_module(key, name) for key, name in self._config_mapping.items() if key in self._model_mapping.keys() ] return mapping_keys + list(self._extra_content.keys()) def get(self, key, default): try: return self.__getitem__(key) except KeyError: return default def __bool__(self): return bool(self.keys()) def values(self): mapping_values = [ self._load_attr_from_module(key, name) for key, name in self._model_mapping.items() if key in self._config_mapping.keys() ] return mapping_values + list(self._extra_content.values()) def items(self): mapping_items = [ ( self._load_attr_from_module(key, self._config_mapping[key]), self._load_attr_from_module(key, self._model_mapping[key]), ) for key in self._model_mapping.keys() if key in self._config_mapping.keys() ] return mapping_items + list(self._extra_content.items()) def __iter__(self): return iter(self.keys()) def __contains__(self, item): if item in self._extra_content: return True if not hasattr(item, "__name__") or item.__name__ not in self._reverse_config_mapping: return False model_type = self._reverse_config_mapping[item.__name__] return model_type in self._model_mapping def register(self, key, value, exist_ok=False): """ Register a new model in this mapping. """ if hasattr(key, "__name__") and key.__name__ in self._reverse_config_mapping: model_type = self._reverse_config_mapping[key.__name__] if model_type in self._model_mapping.keys() and not exist_ok: raise ValueError(f"'{key}' is already used by a Transformers model.") self._extra_content[key] = value
transformers/src/transformers/models/auto/auto_factory.py/0
{ "file_path": "transformers/src/transformers/models/auto/auto_factory.py", "repo_id": "transformers", "token_count": 17444 }
320
# coding=utf-8 # Copyright 2023 The Suno AI Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch BARK model.""" import math from typing import Dict, Optional, Tuple, Union import numpy as np import torch from torch import nn from torch.nn import functional as F from ...generation.logits_process import ( AlternatingCodebooksLogitsProcessor, BarkEosPrioritizerLogitsProcessor, SuppressTokensLogitsProcessor, ) from ...modeling_attn_mask_utils import _prepare_4d_attention_mask from ...modeling_outputs import CausalLMOutputWithPast, MaskedLMOutput from ...modeling_utils import PreTrainedModel, get_parameter_device from ...utils import ( add_start_docstrings, add_start_docstrings_to_model_forward, is_accelerate_available, is_flash_attn_2_available, is_flash_attn_greater_or_equal_2_10, logging, ) from ..auto import AutoModel from .configuration_bark import ( BarkCoarseConfig, BarkConfig, BarkFineConfig, BarkSemanticConfig, BarkSubModelConfig, ) from .generation_configuration_bark import ( BarkCoarseGenerationConfig, BarkFineGenerationConfig, BarkSemanticGenerationConfig, ) if is_flash_attn_2_available(): from flash_attn import flash_attn_func, flash_attn_varlen_func from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "suno/bark-small" _CONFIG_FOR_DOC = "BarkConfig" BARK_PRETRAINED_MODEL_ARCHIVE_LIST = [ "suno/bark-small", "suno/bark", # See all Bark models at https://huggingface.co/models?filter=bark ] # Copied from transformers.models.llama.modeling_llama._get_unpad_data def _get_unpad_data(attention_mask): seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() max_seqlen_in_batch = seqlens_in_batch.max().item() cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0)) return ( indices, cu_seqlens, max_seqlen_in_batch, ) class BarkSelfAttention(nn.Module): # adapted from GPTNeoSelfAttention and Bark code # BarkSelfAttention can have two attention type, i.e full attention or causal attention def __init__(self, config, is_causal=False): super().__init__() # regularization self.dropout = config.dropout self.attn_dropout = nn.Dropout(config.dropout) self.resid_dropout = nn.Dropout(config.dropout) self.embed_dim = config.hidden_size self.num_heads = config.num_heads self.head_dim = self.embed_dim // self.num_heads if config.hidden_size % config.num_heads != 0: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) # key, query, value projections for all heads, but in a batch self.att_proj = nn.Linear(config.hidden_size, 3 * config.hidden_size, bias=config.bias) # output projection self.out_proj = nn.Linear(config.hidden_size, config.hidden_size, bias=config.bias) self.is_causal = is_causal if is_causal: block_size = config.block_size bias = torch.tril(torch.ones((block_size, block_size), dtype=bool)).view(1, 1, block_size, block_size) self.register_buffer("bias", bias) # Copied from transformers.models.gpt_neo.modeling_gpt_neo.GPTNeoSelfAttention._split_heads def _split_heads(self, tensor, num_heads, attn_head_size): """ Splits hidden_size dim into attn_head_size and num_heads """ new_shape = tensor.size()[:-1] + (num_heads, attn_head_size) tensor = tensor.view(new_shape) return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features) def _merge_heads(self, tensor, num_heads, attn_head_size): """ Merges attn_head_size dim and num_attn_heads dim into hidden_size """ # re-assemble all head outputs side by side # (batch, num_heads, seq_len, attn_head_size) -> (batch, seq_len, num_heads*attn_head_size) tensor = tensor.transpose(1, 2).contiguous() tensor = tensor.view(tensor.size()[:-2] + (num_heads * attn_head_size,)) return tensor def _attn(self, query, key, value, attention_mask=None, head_mask=None): # unlike GPTNeo's SelfAttention, divide by the square root of the dimension of the query and the key attn_weights = torch.matmul(query, key.transpose(-1, -2)) * (1.0 / math.sqrt(self.head_dim)) if self.is_causal: query_length, key_length = query.size(-2), key.size(-2) # fill the upper left part of the attention weights with inf attn_weights = attn_weights.masked_fill( self.bias[:, :, key_length - query_length : key_length, :key_length] == 0, torch.finfo(attn_weights.dtype).min, ) if attention_mask is not None: # Apply the attention mask attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1) attn_weights = attn_weights.to(value.dtype) attn_weights = self.attn_dropout(attn_weights) # Mask heads if we want to if head_mask is not None: attn_weights = attn_weights * head_mask # (batch, num_heads, seq_len, seq_len) x (batch, num_heads, seq_len, attn_head_size) # -> (batch, num_heads, seq_len, attn_head_size) attn_output = torch.matmul(attn_weights, value) return attn_output, attn_weights def forward( self, hidden_states, attention_mask=None, past_key_values=None, head_mask=None, use_cache=False, output_attentions=False, ): # calculate query, key, values for all heads in batch and move head forward to be the batch dim query, key, value = self.att_proj(hidden_states).split(self.embed_dim, dim=2) query = self._split_heads(query, self.num_heads, self.head_dim) key = self._split_heads(key, self.num_heads, self.head_dim) value = self._split_heads(value, self.num_heads, self.head_dim) if past_key_values is not None: past_key = past_key_values[0] past_value = past_key_values[1] key = torch.cat((past_key, key), dim=-2) value = torch.cat((past_value, value), dim=-2) if use_cache is True: present = (key, value) else: present = None attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask) attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim) attn_output = self.out_proj(attn_output) attn_output = self.resid_dropout(attn_output) outputs = (attn_output, present) if output_attentions: outputs += (attn_weights,) return outputs class BarkSelfFlashAttention2(BarkSelfAttention): """ Bark flash attention module. This module inherits from `BarkSelfAttention` as the weights of the module stays untouched. The only required change would be on the forward pass where it needs to correctly call the public API of flash attention and deal with padding tokens in case the input contains any of them. """ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() def _split_heads(self, tensor, num_heads, attn_head_size): """ Splits hidden_size dim into attn_head_size and num_heads """ new_shape = tensor.size()[:-1] + (num_heads, attn_head_size) tensor = tensor.view(new_shape) # Flash attention requires the input to have the shape # batch_size x seq_length x head_dim x hidden_dim - (batch, seq_length, head, head_features) return tensor def _merge_heads(self, tensor, num_heads, attn_head_size): """ Merges attn_head_size dim and num_attn_heads dim into hidden_size """ # re-assemble all head outputs side by side # (batch, seq_len, num_heads, attn_head_size) -> (batch, seq_len, num_heads*attn_head_size) tensor = tensor.view(tensor.size()[:-2] + (num_heads * attn_head_size,)) return tensor def forward( self, hidden_states, attention_mask=None, past_key_values=None, head_mask=None, use_cache=False, output_attentions=False, ): batch_size, query_len, _ = hidden_states.size() # calculate query, key, values for all heads in batch and move head forward to be the batch dim query, key, value = self.att_proj(hidden_states).split(self.embed_dim, dim=2) query = self._split_heads(query, self.num_heads, self.head_dim) key = self._split_heads(key, self.num_heads, self.head_dim) value = self._split_heads(value, self.num_heads, self.head_dim) if past_key_values is not None: # (batch, head, seq_length, head_features) -> (batch, seq_length, head, head_features) past_key = past_key_values[0].transpose(1, 2) past_value = past_key_values[1].transpose(1, 2) # and merge on seq_length key = torch.cat((past_key, key), dim=1) value = torch.cat((past_value, value), dim=1) if use_cache is True: # (batch, head, seq_length, head_features) present = (key.transpose(1, 2), value.transpose(1, 2)) else: present = None attn_output = self._flash_attention_forward(query, key, value, attention_mask, query_len, dropout=self.dropout) attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim) attn_output = self.out_proj(attn_output) attn_output = self.resid_dropout(attn_output) outputs = (attn_output, present) if output_attentions: attn_weights = None outputs += (attn_weights,) return outputs # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward def _flash_attention_forward( self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None ): """ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token first unpad the input, then computes the attention scores and pad the final attention scores. Args: query_states (`torch.Tensor`): Input query states to be passed to Flash Attention API key_states (`torch.Tensor`): Input key states to be passed to Flash Attention API value_states (`torch.Tensor`): Input value states to be passed to Flash Attention API attention_mask (`torch.Tensor`): The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the position of padding tokens and 1 for the position of non-padding tokens. dropout (`float`): Attention dropout softmax_scale (`float`, *optional*): The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) """ if not self._flash_attn_uses_top_left_mask: causal = self.is_causal else: # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__. causal = self.is_causal and query_length != 1 # Contains at least one padding token in the sequence if attention_mask is not None: batch_size = query_states.shape[0] query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( query_states, key_states, value_states, attention_mask, query_length ) cu_seqlens_q, cu_seqlens_k = cu_seq_lens max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens attn_output_unpad = flash_attn_varlen_func( query_states, key_states, value_states, cu_seqlens_q=cu_seqlens_q, cu_seqlens_k=cu_seqlens_k, max_seqlen_q=max_seqlen_in_batch_q, max_seqlen_k=max_seqlen_in_batch_k, dropout_p=dropout, softmax_scale=softmax_scale, causal=causal, ) attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) else: attn_output = flash_attn_func( query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal ) return attn_output # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape key_layer = index_first_axis( key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k ) value_layer = index_first_axis( value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k ) if query_length == kv_seq_len: query_layer = index_first_axis( query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k ) cu_seqlens_q = cu_seqlens_k max_seqlen_in_batch_q = max_seqlen_in_batch_k indices_q = indices_k elif query_length == 1: max_seqlen_in_batch_q = 1 cu_seqlens_q = torch.arange( batch_size + 1, dtype=torch.int32, device=query_layer.device ) # There is a memcpy here, that is very bad. indices_q = cu_seqlens_q[:-1] query_layer = query_layer.squeeze(1) else: # The -q_len: slice assumes left padding. attention_mask = attention_mask[:, -query_length:] query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) return ( query_layer, key_layer, value_layer, indices_q, (cu_seqlens_q, cu_seqlens_k), (max_seqlen_in_batch_q, max_seqlen_in_batch_k), ) BARK_ATTENTION_CLASSES = { "eager": BarkSelfAttention, "flash_attention_2": BarkSelfFlashAttention2, } class BarkLayerNorm(nn.Module): """LayerNorm but with an optional bias. PyTorch doesn't support simply bias=False.""" def __init__(self, hidden_size, bias=True): super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) if bias else None def forward(self, input): return F.layer_norm(input, self.weight.shape, self.weight, self.bias, eps=1e-5) class BarkMLP(nn.Module): def __init__(self, config): super().__init__() self.in_proj = nn.Linear(config.hidden_size, 4 * config.hidden_size, bias=config.bias) self.out_proj = nn.Linear(4 * config.hidden_size, config.hidden_size, bias=config.bias) self.dropout = nn.Dropout(config.dropout) self.gelu = nn.GELU() def forward(self, hidden_states): hidden_states = self.in_proj(hidden_states) hidden_states = self.gelu(hidden_states) hidden_states = self.out_proj(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class BarkBlock(nn.Module): def __init__(self, config, is_causal=False): super().__init__() if is_causal: # if causal, uses handmade LayerNorm, so that the layerNorm bias is optional # this handmade layerNorm is used to stick with Bark choice of leaving optional bias in # AutoRegressive models (corresponding to the "Text" and the "Coarse" modules) self.layernorm_1 = BarkLayerNorm(config.hidden_size, bias=config.bias) self.layernorm_2 = BarkLayerNorm(config.hidden_size, bias=config.bias) else: self.layernorm_1 = nn.LayerNorm(config.hidden_size) self.layernorm_2 = nn.LayerNorm(config.hidden_size) self.attn = BARK_ATTENTION_CLASSES[config._attn_implementation](config, is_causal=is_causal) self.mlp = BarkMLP(config) def forward( self, hidden_states, past_key_values=None, attention_mask=None, head_mask=None, use_cache=False, output_attentions=False, ): intermediary_hidden_states = self.layernorm_1(hidden_states) attn_outputs = self.attn( intermediary_hidden_states, past_key_values=past_key_values, attention_mask=attention_mask, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, ) attn_output = attn_outputs[0] # output_attn: output, present_key_values, (attn_weights) outputs = attn_outputs[1:] intermediary_hidden_states = hidden_states + attn_output intermediary_hidden_states = intermediary_hidden_states + self.mlp( self.layernorm_2(intermediary_hidden_states) ) if use_cache: outputs = (intermediary_hidden_states,) + outputs else: outputs = (intermediary_hidden_states,) + outputs[1:] return outputs # hidden_states, ((present), attentions) class BarkPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = BarkConfig supports_gradient_checkpointing = False _supports_flash_attn_2 = True def _init_weights(self, module): """Initialize the weights.""" if isinstance(module, (nn.Linear,)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) def __init__(self, *inputs, **kwargs): super().__init__(*inputs, **kwargs) @property def device(self) -> torch.device: """ `torch.device`: The device on which the module is (assuming that all the module parameters are on the same device). """ # if has _hf_hook, has been offloaded so the device has to be found in the hook if not hasattr(self, "_hf_hook"): return get_parameter_device(self) for module in self.modules(): if ( hasattr(module, "_hf_hook") and hasattr(module._hf_hook, "execution_device") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return get_parameter_device(self) BARK_MODEL_START_DOCSTRING = """ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`{config}`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ BARK_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`BarkConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ BARK_FINE_INPUTS_DOCSTRING = r""" Args: codebook_idx (`int`): Index of the codebook that will be predicted. input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length, number_of_codebooks)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Initially, indices of the first two codebooks are obtained from the `coarse` sub-model. The rest is predicted recursively by attending the previously predicted channels. The model predicts on windows of length 1024. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): NOT IMPLEMENTED YET. input_embeds (`torch.FloatTensor` of shape `(batch_size, input_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. If `past_key_values` is used, optionally only the last `input_embeds` have to be input (see `past_key_values`). This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ BARK_CAUSAL_MODEL_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids` of shape `(batch_size, sequence_length)`. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. input_embeds (`torch.FloatTensor` of shape `(batch_size, input_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. Here, due to `Bark` particularities, if `past_key_values` is used, `input_embeds` will be ignored and you have to use `input_ids`. If `past_key_values` is not used and `use_cache` is set to `True`, `input_embeds` is used in priority instead of `input_ids`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ # GPT2-like autoregressive model class BarkCausalModel(BarkPreTrainedModel): config_class = BarkSubModelConfig def __init__(self, config): super().__init__(config) self.config = config # initialize as an autoregressive GPT-like model self.input_embeds_layer = nn.Embedding(config.input_vocab_size, config.hidden_size) self.position_embeds_layer = nn.Embedding(config.block_size, config.hidden_size) self.drop = nn.Dropout(config.dropout) self.layers = nn.ModuleList([BarkBlock(config, is_causal=True) for _ in range(config.num_layers)]) self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2" self.layernorm_final = BarkLayerNorm(config.hidden_size, bias=config.bias) self.lm_head = nn.Linear(config.hidden_size, config.output_vocab_size, bias=False) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.input_embeds_layer def set_input_embeddings(self, new_embeddings): self.input_embeds_layer = new_embeddings def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **kwargs): input_embeds = kwargs.get("input_embeds", None) attention_mask = kwargs.get("attention_mask", None) position_ids = kwargs.get("position_ids", None) if past_key_values is not None: # Omit tokens covered by past_key_values seq_len = input_ids.shape[1] past_length = past_key_values[0][0].shape[2] # Some generation methods already pass only the last input ID if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: # Default to old behavior: keep only final ID remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] # input_embeds have already been used and is not required anymore input_embeds = None else: if input_embeds is not None and kwargs.get("use_cache"): seq_len = input_embeds.shape[1] else: seq_len = input_ids.shape[1] # ensure that attention_mask and position_ids shapes are aligned with the weird Bark hack of reducing # sequence length on the first forward pass if attention_mask is not None: attention_mask = attention_mask[:, :seq_len] if position_ids is not None: position_ids = position_ids[:, :seq_len] if attention_mask is not None and position_ids is None: # create position_ids on the fly for batch generation position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -input_ids.shape[1] :] else: position_ids = None if input_embeds is not None and kwargs.get("use_cache"): return { "input_ids": None, "input_embeds": input_embeds, "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "position_ids": position_ids, "attention_mask": attention_mask, } return { "input_ids": input_ids, "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "position_ids": position_ids, "attention_mask": attention_mask, } @add_start_docstrings_to_model_forward(BARK_CAUSAL_MODEL_INPUTS_DOCSTRING) def forward( self, input_ids: Optional[torch.Tensor] = None, past_key_values: Optional[Tuple[torch.FloatTensor]] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, labels: Optional[torch.LongTensor] = None, input_embeds: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # Verify if input_embeds already exists # then compute embeddings. if input_ids is not None and input_embeds is not None: raise ValueError("You cannot specify both input_ids and input_embeds at the same time") elif input_embeds is not None and past_key_values is None: # we want to return the input_embeds in priority so that it is in line with a weird hack # of Bark which concatenate two bits of the input_embeds on the first forward pass of the semantic model pass elif input_ids is not None: input_embeds = self.input_embeds_layer(input_ids) # token embeddings of shape (b, t, n_embd) elif input_embeds is not None: pass else: raise ValueError("You have to specify either input_ids or input_embeds") input_shape = input_embeds.size()[:-1] batch_size = input_embeds.shape[0] seq_length = input_shape[-1] device = input_ids.device if input_ids is not None else input_embeds.device if past_key_values is None: past_length = 0 past_key_values = tuple([None] * len(self.layers)) else: past_length = past_key_values[0][0].size(-2) if position_ids is None: position_ids = torch.arange(past_length, seq_length + past_length, dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0) # shape (1, seq_length) position_embeds = self.position_embeds_layer(position_ids) # position embeddings of shape (1, t, n_embd) # Attention mask. if attention_mask is not None: if batch_size <= 0: raise ValueError("batch_size has to be defined and > 0") if self._use_flash_attention_2: attention_mask = attention_mask if 0 in attention_mask else None else: attention_mask = attention_mask.view(batch_size, -1) # [bsz, to_seq_length] -> [bsz, 1, 1, to_seq_length] # from_seq_length is 1 to easily broadcast attention_mask = _prepare_4d_attention_mask(attention_mask, input_embeds.dtype, tgt_len=1) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x num_heads x N x N # head_mask has shape num_layers x batch x num_heads x N x N head_mask = self.get_head_mask(head_mask, self.config.num_layers) hidden_states = self.drop(input_embeds + position_embeds) output_shape = input_shape + (hidden_states.size(-1),) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False present_key_values = () if use_cache else None all_self_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for i, (block, past_layer_key_values) in enumerate(zip(self.layers, past_key_values)): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: outputs = self._gradient_checkpointing_func( block.__call__, hidden_states, None, attention_mask, head_mask[i], use_cache, output_attentions, ) else: outputs = block( hidden_states, past_key_values=past_layer_key_values, attention_mask=attention_mask, head_mask=head_mask[i], use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = outputs[0] if use_cache: present_key_values = present_key_values + (outputs[1],) if output_attentions: all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) hidden_states = self.layernorm_final(hidden_states) hidden_states = hidden_states.view(output_shape) # Add last hidden state if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) logits = self.lm_head(hidden_states) loss = None if labels is not None: raise NotImplementedError( "Training is not implemented yet for Bark - ensure you do not pass `labels` to the model." ) if not return_dict: return tuple( v for v in [None, logits, present_key_values, all_hidden_states, all_self_attentions] if v is not None ) return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=present_key_values, hidden_states=all_hidden_states, attentions=all_self_attentions, ) @staticmethod def _reorder_cache( past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor ) -> Tuple[Tuple[torch.Tensor]]: """ This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct beam_idx at every generation step. """ # Necessary for beam_search return tuple( tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past) for layer_past in past_key_values ) @add_start_docstrings( """Bark semantic (or text) model. It shares the same architecture as the coarse model. It is a GPT-2 like autoregressive model with a language modeling head on top.""", BARK_MODEL_START_DOCSTRING.format(config="BarkSemanticConfig"), ) class BarkSemanticModel(BarkCausalModel): base_model_prefix = "semantic" config_class = BarkSemanticConfig def generate( self, input_ids: torch.Tensor, semantic_generation_config: BarkSemanticGenerationConfig = None, history_prompt: Optional[Dict[str, torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, **kwargs, ) -> torch.LongTensor: """ Generates text semantic tokens from an input prompt and an additional optional `Bark` speaker prompt. Args: input_ids (`Optional[torch.Tensor]` of shape (batch_size, seq_len), *optional*): Input ids, i.e tokenized input sentences. Will be truncated up to semantic_generation_config.max_input_semantic_length tokens. Note that the output audios will be as long as the longest generation among the batch. semantic_generation_config (`BarkSemanticGenerationConfig`): Generation config indicating how to generate the semantic tokens. history_prompt (`Optional[Dict[str,torch.Tensor]]`, *optional*): Optional `Bark` speaker prompt. attention_mask (`Optional[torch.Tensor]`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) Returns: torch.LongTensor: Output semantic tokens. """ if semantic_generation_config is None: raise ValueError("`semantic_generation_config` has to be provided") batch_size = input_ids.shape[0] max_input_semantic_length = semantic_generation_config.max_input_semantic_length input_ids = input_ids + semantic_generation_config.text_encoding_offset if attention_mask is not None: input_ids = input_ids.masked_fill((1 - attention_mask).bool(), semantic_generation_config.text_pad_token) if history_prompt is not None: semantic_history = history_prompt["semantic_prompt"][-max_input_semantic_length:] semantic_history = nn.functional.pad( semantic_history, (0, max_input_semantic_length - len(semantic_history)), value=semantic_generation_config.semantic_pad_token, mode="constant", ) else: semantic_history = torch.tensor( [semantic_generation_config.semantic_pad_token] * max_input_semantic_length, dtype=torch.int ).to(self.device) semantic_history = torch.repeat_interleave(semantic_history[None], batch_size, dim=0) infer_array = torch.tensor( [[semantic_generation_config.semantic_infer_token]] * batch_size, dtype=torch.int ).to(self.device) input_embeds = torch.cat( [ self.input_embeds_layer(input_ids[:, :max_input_semantic_length]) + self.input_embeds_layer(semantic_history[:, : max_input_semantic_length + 1]), self.input_embeds_layer(infer_array), ], dim=1, ) tokens_to_suppress = list( range(semantic_generation_config.semantic_vocab_size, semantic_generation_config.semantic_pad_token) ) tokens_to_suppress.extend( list(range(semantic_generation_config.semantic_pad_token + 1, self.config.output_vocab_size)) ) suppress_tokens_logits_processor = SuppressTokensLogitsProcessor(tokens_to_suppress) min_eos_p = kwargs.get("min_eos_p", semantic_generation_config.min_eos_p) early_stopping_logits_processor = BarkEosPrioritizerLogitsProcessor( eos_token_id=semantic_generation_config.eos_token_id, min_eos_p=min_eos_p ) # pass input_ids in order to stay consistent with the transformers generate method even though it is not used # (except to get the input seq_len - that's why we keep the first 257 tokens) semantic_output = super().generate( torch.ones((batch_size, max_input_semantic_length + 1), dtype=torch.int).to(self.device), input_embeds=input_embeds, logits_processor=[suppress_tokens_logits_processor, early_stopping_logits_processor], generation_config=semantic_generation_config, **kwargs, ) # size: 10048 # take the generated semantic tokens semantic_output = semantic_output[:, max_input_semantic_length + 1 :] return semantic_output @add_start_docstrings( """Bark coarse acoustics model. It shares the same architecture as the semantic (or text) model. It is a GPT-2 like autoregressive model with a language modeling head on top.""", BARK_MODEL_START_DOCSTRING.format(config="BarkCoarseConfig"), ) class BarkCoarseModel(BarkCausalModel): base_model_prefix = "coarse_acoustics" config_class = BarkCoarseConfig def preprocess_histories( self, max_coarse_history: int, semantic_to_coarse_ratio: int, batch_size: int, semantic_generation_config: int, codebook_size: int, history_prompt: Optional[Dict[str, torch.Tensor]] = None, ): """ Preprocess the optional `Bark` speaker prompts before `self.generate`. Args: max_coarse_history (`int`): Maximum size of coarse tokens used. semantic_to_coarse_ratio (`int`): Ratio of semantic to coarse frequency batch_size (`int`): Batch size, i.e the number of samples. semantic_generation_config (`BarkSemanticGenerationConfig`): Generation config indicating how to generate the semantic tokens. codebook_size (`int`): Codebook channel size, i.e. the size of the output vocabulary per codebook channel. history_prompt (`Optional[Dict[str,torch.Tensor]]`): Optional `Bark` speaker prompt. Returns: Returns: `tuple(torch.FloatTensor)`: - **x_semantic_history** (`torch.FloatTensor` -- Processed semantic speaker prompt. - **x_coarse_history** (`torch.FloatTensor`) -- Processed coarse speaker prompt. """ if history_prompt is not None: x_semantic_history = torch.repeat_interleave(history_prompt["semantic_prompt"][None], batch_size, dim=0) # clone to avoid modifying history_prompt.coarse_prompt x_coarse_history = history_prompt["coarse_prompt"].clone() # offset x_coarse_history if codebook_size is not None: for n in range(1, x_coarse_history.shape[0]): # offset x_coarse_history[n, :] += codebook_size * n # flatten x_coarse_history x_coarse_history = torch.transpose(x_coarse_history, 0, 1).view(-1) x_coarse_history = x_coarse_history + semantic_generation_config.semantic_vocab_size x_coarse_history = torch.repeat_interleave(x_coarse_history[None], batch_size, dim=0) # e.g: after SEMANTIC_VOCAB_SIZE (10000), 1024 tokens dedicated to first codebook, 1024 next tokens # dedicated to second codebook. max_semantic_history = int(np.floor(max_coarse_history / semantic_to_coarse_ratio)) # trim histories correctly n_semantic_hist_provided = min( [ max_semantic_history, x_semantic_history.shape[1] - x_semantic_history.shape[1] % 2, int(np.floor(x_coarse_history.shape[1] / semantic_to_coarse_ratio)), ] ) n_coarse_hist_provided = int(round(n_semantic_hist_provided * semantic_to_coarse_ratio)) x_semantic_history = x_semantic_history[:, -n_semantic_hist_provided:].int() x_coarse_history = x_coarse_history[:, -n_coarse_hist_provided:].int() # bit of a hack for time alignment (sounds better) - from Bark original implementation x_coarse_history = x_coarse_history[:, :-2] else: # shape: (batch_size, 0) x_semantic_history = torch.tensor([[]] * batch_size, dtype=torch.int).to(self.device) x_coarse_history = torch.tensor([[]] * batch_size, dtype=torch.int).to(self.device) return x_semantic_history, x_coarse_history def generate( self, semantic_output: torch.Tensor, semantic_generation_config: BarkSemanticGenerationConfig = None, coarse_generation_config: BarkCoarseGenerationConfig = None, codebook_size: int = 1024, history_prompt: Optional[Dict[str, torch.Tensor]] = None, return_output_lengths: Optional[bool] = None, **kwargs, ) -> Union[torch.LongTensor, Tuple[torch.LongTensor, torch.LongTensor]]: """ Generates coarse acoustics tokens from input text semantic tokens and an additional optional `Bark` speaker prompt. Args: semantic_output (`torch.Tensor` of shape (batch_size, seq_len), *optional*): Input text semantic ids, i.e the output of `BarkSemanticModel.generate`. semantic_generation_config (`BarkSemanticGenerationConfig`): Generation config indicating how to generate the semantic tokens. coarse_generation_config (`BarkCoarseGenerationConfig`): Generation config indicating how to generate the coarse tokens. codebook_size (`int`, *optional*, defaults to 1024): Codebook channel size, i.e. the size of the output vocabulary per codebook channel. history_prompt (`Optional[Dict[str,torch.Tensor]]`, *optional*): Optional `Bark` speaker prompt. return_output_lengths (`bool`, *optional*): Whether or not to return the output lengths. Useful when batching. Returns: By default: torch.LongTensor: Output coarse acoustics tokens. If `return_output_lengths=True`: `Tuple(torch.Tensor, torch.Tensor): The output coarse acoustics tokens, and the length of each sample of the batch. """ if semantic_generation_config is None: raise ValueError("`semantic_generation_config` has to be provided") if coarse_generation_config is None: raise ValueError("`coarse_generation_config` has to be provided") max_coarse_input_length = coarse_generation_config.max_coarse_input_length max_coarse_history = coarse_generation_config.max_coarse_history sliding_window_len = coarse_generation_config.sliding_window_len # replace semantic_pad_token (eos_tok and pad_tok here) with coarse_semantic_pad_token i.e the pad_token # used in the next model semantic_output.masked_fill_( semantic_output == semantic_generation_config.semantic_pad_token, coarse_generation_config.coarse_semantic_pad_token, ) semantic_to_coarse_ratio = ( coarse_generation_config.coarse_rate_hz / semantic_generation_config.semantic_rate_hz * coarse_generation_config.n_coarse_codebooks ) max_semantic_history = int(np.floor(max_coarse_history / semantic_to_coarse_ratio)) output_lengths = (semantic_output != coarse_generation_config.coarse_semantic_pad_token).sum(1) output_lengths = torch.floor( output_lengths * semantic_to_coarse_ratio / coarse_generation_config.n_coarse_codebooks ) output_lengths = torch.round(output_lengths * coarse_generation_config.n_coarse_codebooks).int() max_generated_len = torch.max(output_lengths).item() batch_size = semantic_output.shape[0] x_semantic_history, x_coarse = self.preprocess_histories( history_prompt=history_prompt, max_coarse_history=max_coarse_history, semantic_to_coarse_ratio=semantic_to_coarse_ratio, batch_size=batch_size, semantic_generation_config=semantic_generation_config, codebook_size=codebook_size, ) base_semantic_idx = x_semantic_history.shape[1] semantic_output = torch.hstack([x_semantic_history, semantic_output]) n_window_steps = int(np.ceil(max_generated_len / sliding_window_len)) total_generated_len = 0 len_coarse_history = x_coarse.shape[1] for _ in range(n_window_steps): semantic_idx = base_semantic_idx + int(round(total_generated_len / semantic_to_coarse_ratio)) # pad from right side input_coarse = semantic_output[:, np.max([0, semantic_idx - max_semantic_history]) :] input_coarse = input_coarse[:, :max_coarse_input_length] input_coarse = F.pad( input_coarse, (0, max_coarse_input_length - input_coarse.shape[-1]), "constant", coarse_generation_config.coarse_semantic_pad_token, ) input_coarse = torch.hstack( [ input_coarse, torch.tensor([[coarse_generation_config.coarse_infer_token]] * batch_size).to(self.device), x_coarse[:, -max_coarse_history:], ] ) alternatingLogitsProcessor = AlternatingCodebooksLogitsProcessor( input_coarse.shape[1], semantic_generation_config.semantic_vocab_size, codebook_size, ) output_coarse = super().generate( input_coarse, logits_processor=[alternatingLogitsProcessor], max_new_tokens=min(sliding_window_len, max_generated_len - total_generated_len), generation_config=coarse_generation_config, **kwargs, ) input_coarse_len = input_coarse.shape[1] x_coarse = torch.hstack([x_coarse, output_coarse[:, input_coarse_len:]]) total_generated_len = x_coarse.shape[1] - len_coarse_history del output_coarse coarse_output = x_coarse[:, len_coarse_history:] if return_output_lengths: return coarse_output, output_lengths return coarse_output @add_start_docstrings( """Bark fine acoustics model. It is a non-causal GPT-like model with `config.n_codes_total` embedding layers and language modeling heads, one for each codebook.""", BARK_MODEL_START_DOCSTRING.format(config="BarkFineConfig"), ) class BarkFineModel(BarkPreTrainedModel): base_model_prefix = "fine_acoustics" config_class = BarkFineConfig main_input_name = "codebook_idx" def __init__(self, config): # non-causal gpt-like model with one embedding layer and one lm_head for each codebook of Encodec super().__init__(config) self.config = config # initialize a modified non causal GPT-like model # note that for there is one embedding layer and one lm_head for each codebook of Encodec self.input_embeds_layers = nn.ModuleList( [nn.Embedding(config.input_vocab_size, config.hidden_size) for _ in range(config.n_codes_total)] ) self.position_embeds_layer = nn.Embedding(config.block_size, config.hidden_size) self.drop = nn.Dropout(config.dropout) self.layers = nn.ModuleList([BarkBlock(config, is_causal=False) for _ in range(config.num_layers)]) self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2" self.layernorm_final = nn.LayerNorm(config.hidden_size) self.lm_heads = nn.ModuleList( [ nn.Linear(config.hidden_size, config.output_vocab_size, bias=False) for _ in range(config.n_codes_given, config.n_codes_total) ] ) self.gradient_checkpointing = False self.n_codes_total = config.n_codes_total # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): # one embedding layers for each codebook return self.input_embeds_layers def set_input_embeddings(self, new_embeddings): # one embedding layers for each codebook self.input_embeds_layers = new_embeddings def get_output_embeddings(self): # one lm_head for each codebook return self.lm_heads def set_output_embeddings(self, new_output_embeddings): # one lm_head for each codebook self.lm_heads = new_output_embeddings def _resize_token_embeddings(self, new_num_tokens, pad_to_multiple_of=None): old_embeddings_list = self.get_input_embeddings() new_embeddings_list = nn.ModuleList( [ self._get_resized_embeddings(old_embeddings, new_num_tokens, pad_to_multiple_of) for old_embeddings in old_embeddings_list ] ) self.set_input_embeddings(new_embeddings_list) new_num_tokens = new_embeddings_list[0].weight.shape[0] # if word embeddings are not tied, make sure that lm head is resized as well if self.get_output_embeddings() is not None and not self.config.tie_word_embeddings: old_lm_head_list = self.get_output_embeddings() new_lm_head_list = nn.ModuleList( [self._get_resized_lm_head(old_lm_head, new_num_tokens) for old_lm_head in old_lm_head_list] ) self.set_output_embeddings(new_lm_head_list) return self.get_input_embeddings() def resize_token_embeddings( self, new_num_tokens: Optional[int] = None, pad_to_multiple_of: Optional[int] = None ) -> nn.Embedding: """ Resizes input token embeddings matrix of the model if `new_num_tokens != config.vocab_size`. Takes care of tying weights embeddings afterwards if the model class has a `tie_weights()` method. Arguments: new_num_tokens (`int`, *optional*): The number of new tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just returns a pointer to the input tokens `torch.nn.Embedding` module of the model without doing anything. pad_to_multiple_of (`int`, *optional*): If set will pad the embedding matrix to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. For more details about this, or help on choosing the correct value for resizing, refer to this guide: https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html#requirements-tc Return: `torch.nn.Embedding`: Pointer to the input tokens Embeddings Module of the model. """ model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of) if new_num_tokens is None and pad_to_multiple_of is None: return model_embeds # Update base model and current model config self.config.output_vocab_size = model_embeds[0].weight.shape[0] self.config.vocab_size = model_embeds[0].weight.shape[0] self.output_vocab_size = model_embeds[0].weight.shape[0] self.vocab_size = model_embeds[0].weight.shape[0] # Tie weights again if needed self.tie_weights() return model_embeds def tie_weights(self): """ Tie the weights between the input embeddings list and the output embeddings list. If the `torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning the weights instead. """ if getattr(self.config, "tie_word_embeddings", True): self._tied_weights_keys = [] output_embeddings = self.get_output_embeddings() input_embeddings = self.get_input_embeddings() for i in range(self.config.n_codes_total - self.config.n_codes_given): # self.input_embeds_layers[i + 1].weight = self.lm_heads[i].weight self._tie_or_clone_weights(output_embeddings[i], input_embeddings[i + 1]) self._tied_weights_keys.append(f"lm_heads.{i}.weight") for module in self.modules(): if hasattr(module, "_tie_weights"): module._tie_weights() @add_start_docstrings_to_model_forward(BARK_FINE_INPUTS_DOCSTRING) def forward( self, codebook_idx: int, # an additionnal idx corresponding to the id of the codebook that will be predicted input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, labels: Optional[torch.LongTensor] = None, input_embeds: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if codebook_idx == 0: raise ValueError("Cannot predict 0th codebook - 0th codebook should be predicted by the coarse model") if input_ids is not None and input_embeds is not None: raise ValueError("You cannot specify both input_ids and input_embeds at the same time") if input_ids is None and input_embeds is None: raise ValueError("You have to specify either input_ids or input_embeds") if input_ids is not None: # the input_embeddings are the sum of the j previous codebooks embeddings before # the current codebook_idx codebook # forward the GPT model itself input_embeds = [ input_embeds_layer(input_ids[:, :, i]).unsqueeze(-1) for i, input_embeds_layer in enumerate(self.input_embeds_layers) ] # token embeddings of shape (b, t, n_embd) input_embeds = torch.cat(input_embeds, dim=-1) input_embeds = input_embeds[:, :, :, : codebook_idx + 1].sum(dim=-1) input_shape = input_embeds.size()[:-1] batch_size = input_embeds.shape[0] seq_length = input_shape[1] device = input_ids.device if input_ids is not None else input_embeds.device if position_ids is None: position_ids = torch.arange(0, seq_length, dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0) # shape (1, seq_length) position_embeds = self.position_embeds_layer(position_ids) # position embeddings of shape (1, t, n_embd) # Attention mask. if attention_mask is not None: if batch_size <= 0: raise ValueError("batch_size has to be defined and > 0") if self._use_flash_attention_2: attention_mask = attention_mask if 0 in attention_mask else None else: # [bsz, to_seq_length] -> [bsz, 1, 1, to_seq_length] # from_seq_length is 1 to easily broadcast attention_mask = _prepare_4d_attention_mask(attention_mask, input_embeds.dtype, tgt_len=1) head_mask = self.get_head_mask(head_mask, self.config.num_layers) hidden_states = self.drop(input_embeds + position_embeds) output_shape = input_shape + (hidden_states.size(-1),) all_self_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for i, block in enumerate(self.layers): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = block( hidden_states, attention_mask=attention_mask, head_mask=head_mask[i], output_attentions=output_attentions, ) hidden_states = outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (outputs[1],) hidden_states = self.layernorm_final(hidden_states) hidden_states = hidden_states.view(output_shape) # Add last hidden state if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) logits = self.lm_heads[codebook_idx - self.config.n_codes_given](hidden_states) loss = None if labels is not None: raise NotImplementedError("Training is not implemented yet") if not return_dict: return tuple(v for v in [None, logits, all_hidden_states, all_self_attentions] if v is not None) return MaskedLMOutput( loss=loss, logits=logits, hidden_states=all_hidden_states, attentions=all_self_attentions, ) def generate( self, coarse_output: torch.Tensor, semantic_generation_config: BarkSemanticGenerationConfig = None, coarse_generation_config: BarkCoarseGenerationConfig = None, fine_generation_config: BarkFineGenerationConfig = None, codebook_size: int = 1024, history_prompt: Optional[Dict[str, torch.Tensor]] = None, **kwargs, ) -> torch.LongTensor: """ Generates fine acoustics tokens from input coarse acoustics tokens and an additional optional `Bark` speaker prompt. Args: coarse_output (`torch.Tensor` of shape (batch_size, seq_len)): Input coarse acoustics ids, i.e the output of `BarkCoarseModel.generate`. semantic_generation_config (`BarkSemanticGenerationConfig`): Generation config indicating how to generate the semantic tokens. coarse_generation_config (`BarkCoarseGenerationConfig`): Generation config indicating how to generate the coarse tokens. fine_generation_config (`BarkFineGenerationConfig`): Generation config indicating how to generate the fine tokens. codebook_size (`int`, *optional*, defaults to 1024): Codebook channel size, i.e. the size of the output vocabulary per codebook channel. history_prompt (`Optional[Dict[str,torch.Tensor]]`, *optional*): Optional `Bark` speaker prompt. Returns: torch.LongTensor: Output fine acoustics tokens. """ if semantic_generation_config is None: raise ValueError("`semantic_generation_config` has to be provided") if coarse_generation_config is None: raise ValueError("`coarse_generation_config` has to be provided") if fine_generation_config is None: raise ValueError("`fine_generation_config` has to be provided") # since we don't really use GenerationConfig through the fine model (autoencoder) # and since only temperature is used from the classic GenerationConfig parameters # manually impose the kwargs priority over the generation config temperature = kwargs.get("temperature", fine_generation_config.temperature) max_fine_history_length = fine_generation_config.max_fine_history_length max_fine_input_length = fine_generation_config.max_fine_input_length # shape: (batch, n_coarse_codebooks * seq_len) # new_shape: (batch, seq_len, n_coarse_codebooks) coarse_output = coarse_output.view(coarse_output.shape[0], -1, coarse_generation_config.n_coarse_codebooks) # brings ids into the range [0, codebook_size -1] coarse_output = torch.remainder(coarse_output - semantic_generation_config.semantic_vocab_size, codebook_size) batch_size = coarse_output.shape[0] if history_prompt is not None: x_fine_history = torch.repeat_interleave(history_prompt["fine_prompt"].T[None], batch_size, dim=0) # transpose to get to shape (seq_len, n_fine_codebooks) else: x_fine_history = None n_coarse = coarse_generation_config.n_coarse_codebooks # pad the last 6th codebooks fine_input = F.pad( coarse_output, (0, fine_generation_config.n_fine_codebooks - n_coarse), "constant", codebook_size, ) # prepend history if available (max max_fine_history_length) if x_fine_history is not None: fine_input = torch.cat([x_fine_history[:, -max_fine_history_length:, :], fine_input], dim=1) # len of the fine_history that has been added to fine_input n_history = x_fine_history[:, -max_fine_history_length:, :].shape[1] else: n_history = 0 n_remove_from_end = 0 # need to pad if too short (since non-causal model) if fine_input.shape[1] < max_fine_input_length: n_remove_from_end = max_fine_input_length - fine_input.shape[1] fine_input = F.pad(fine_input, (0, 0, 0, n_remove_from_end), mode="constant", value=codebook_size) # we can be lazy about fractional loop and just keep overwriting codebooks. # seems that coarse_output.shape[1] - (max_fine_input_length - n_history) is equal to minus n_remove_from_end # So if we needed to pad because too short, n_loops is always 1 (because n_remove_from_end > 0) # If not, we loop over at least twice. n_loops = (coarse_output.shape[1] - (max_fine_input_length - n_history)) / max_fine_history_length n_loops = int(np.ceil(n_loops)) n_loops = max(0, n_loops) + 1 for n_outer in range(n_loops): start_idx = min([n_outer * max_fine_history_length, fine_input.shape[1] - max_fine_input_length]) start_fill_idx = min( [n_history + n_outer * max_fine_history_length, fine_input.shape[1] - max_fine_history_length] ) rel_start_fill_idx = start_fill_idx - start_idx input_buffer = fine_input[:, start_idx : start_idx + max_fine_input_length, :] for n_inner in range(n_coarse, fine_generation_config.n_fine_codebooks): logits = self.forward(n_inner, input_buffer).logits if temperature is None or temperature == 1.0: relevant_logits = logits[:, rel_start_fill_idx:, :codebook_size] codebook_preds = torch.argmax(relevant_logits, -1) else: relevant_logits = logits[:, :, :codebook_size] / temperature # apply softmax probs = F.softmax(relevant_logits, dim=-1)[:, rel_start_fill_idx:max_fine_input_length] # reshape to 2D: (batch_size, seq_len, codebook_size) -> (batch_size*seq_len, codebook_size) probs = probs.reshape((-1, codebook_size)) # multinomial then reshape : (batch_size*seq_len)-> (batch_size,seq_len) codebook_preds = torch.multinomial(probs, num_samples=1).view(batch_size, -1) codebook_preds = codebook_preds.to(torch.int32) input_buffer[:, rel_start_fill_idx:, n_inner] = codebook_preds del logits, codebook_preds # transfer into fine_input for n_inner in range(n_coarse, fine_generation_config.n_fine_codebooks): fine_input[ :, start_fill_idx : start_fill_idx + (max_fine_input_length - rel_start_fill_idx), n_inner ] = input_buffer[:, rel_start_fill_idx:, n_inner] del input_buffer fine_input = fine_input.transpose(1, 2)[:, :, n_history:] if n_remove_from_end > 0: fine_input = fine_input[:, :, :-n_remove_from_end] if fine_input.shape[-1] != coarse_output.shape[-2]: raise ValueError("input and output should have the same seq_len") return fine_input @add_start_docstrings( """ The full Bark model, a text-to-speech model composed of 4 sub-models: - [`BarkSemanticModel`] (also referred to as the 'text' model): a causal auto-regressive transformer model that takes as input tokenized text, and predicts semantic text tokens that capture the meaning of the text. - [`BarkCoarseModel`] (also refered to as the 'coarse acoustics' model), also a causal autoregressive transformer, that takes into input the results of the last model. It aims at regressing the first two audio codebooks necessary to `encodec`. - [`BarkFineModel`] (the 'fine acoustics' model), this time a non-causal autoencoder transformer, which iteratively predicts the last codebooks based on the sum of the previous codebooks embeddings. - having predicted all the codebook channels from the [`EncodecModel`], Bark uses it to decode the output audio array. It should be noted that each of the first three modules can support conditional speaker embeddings to condition the output sound according to specific predefined voice. """, BARK_START_DOCSTRING, ) class BarkModel(BarkPreTrainedModel): config_class = BarkConfig def __init__(self, config): super().__init__(config) self.semantic = BarkSemanticModel(config.semantic_config) self.coarse_acoustics = BarkCoarseModel(config.coarse_acoustics_config) self.fine_acoustics = BarkFineModel(config.fine_acoustics_config) self.codec_model = AutoModel.from_config(config.codec_config) self.config = config @property def device(self) -> torch.device: """ `torch.device`: The device on which the module is (assuming that all the module parameters are on the same device). """ # for bark_model, device must be verified on its sub-models # if has _hf_hook, has been offloaded so the device has to be found in the hook if not hasattr(self.semantic, "_hf_hook"): return get_parameter_device(self) for module in self.semantic.modules(): if ( hasattr(module, "_hf_hook") and hasattr(module._hf_hook, "execution_device") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) def enable_cpu_offload(self, gpu_id: Optional[int] = 0): r""" Offloads all sub-models to CPU using accelerate, reducing memory usage with a low impact on performance. This method moves one whole sub-model at a time to the GPU when it is used, and the sub-model remains in GPU until the next sub-model runs. Args: gpu_id (`int`, *optional*, defaults to 0): GPU id on which the sub-models will be loaded and offloaded. """ if is_accelerate_available(): from accelerate import cpu_offload_with_hook else: raise ImportError("`enable_model_cpu_offload` requires `accelerate`.") device = torch.device(f"cuda:{gpu_id}") if self.device.type != "cpu": self.to("cpu") torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) # this layer is used outside the first foward pass of semantic so need to be loaded before semantic self.semantic.input_embeds_layer, _ = cpu_offload_with_hook(self.semantic.input_embeds_layer, device) hook = None for cpu_offloaded_model in [ self.semantic, self.coarse_acoustics, self.fine_acoustics, ]: _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook) self.fine_acoustics_hook = hook _, hook = cpu_offload_with_hook(self.codec_model, device, prev_module_hook=hook) # We'll offload the last model manually. self.codec_model_hook = hook def codec_decode(self, fine_output, output_lengths=None): """Turn quantized audio codes into audio array using encodec.""" fine_output = fine_output.transpose(0, 1) emb = self.codec_model.quantizer.decode(fine_output) if output_lengths is not None: # encodec uses LSTMs which behaves differently with appended padding # decoding with encodec takes around 0.1% of the total generation time # to keep generation quality, we break batching out = [sample[:, :l].unsqueeze(0) for (sample, l) in zip(emb, output_lengths)] audio_arr = [self.codec_model.decoder(sample).squeeze() for sample in out] else: out = self.codec_model.decoder(emb) audio_arr = out.squeeze(1) # squeeze the codebook dimension return audio_arr @torch.no_grad() def generate( self, input_ids: Optional[torch.Tensor] = None, history_prompt: Optional[Dict[str, torch.Tensor]] = None, return_output_lengths: Optional[bool] = None, **kwargs, ) -> torch.LongTensor: """ Generates audio from an input prompt and an additional optional `Bark` speaker prompt. Args: input_ids (`Optional[torch.Tensor]` of shape (batch_size, seq_len), *optional*): Input ids. Will be truncated up to 256 tokens. Note that the output audios will be as long as the longest generation among the batch. history_prompt (`Optional[Dict[str,torch.Tensor]]`, *optional*): Optional `Bark` speaker prompt. Note that for now, this model takes only one speaker prompt per batch. kwargs (*optional*): Remaining dictionary of keyword arguments. Keyword arguments are of two types: - Without a prefix, they will be entered as `**kwargs` for the `generate` method of each sub-model. - With a *semantic_*, *coarse_*, *fine_* prefix, they will be input for the `generate` method of the semantic, coarse and fine respectively. It has the priority over the keywords without a prefix. This means you can, for example, specify a generation strategy for all sub-models except one. return_output_lengths (`bool`, *optional*): Whether or not to return the waveform lengths. Useful when batching. Returns: By default: - **audio_waveform** (`torch.Tensor` of shape (batch_size, seq_len)): Generated audio waveform. When `return_output_lengths=True`: Returns a tuple made of: - **audio_waveform** (`torch.Tensor` of shape (batch_size, seq_len)): Generated audio waveform. - **output_lengths** (`torch.Tensor` of shape (batch_size)): The length of each waveform in the batch Example: ```python >>> from transformers import AutoProcessor, BarkModel >>> processor = AutoProcessor.from_pretrained("suno/bark-small") >>> model = BarkModel.from_pretrained("suno/bark-small") >>> # To add a voice preset, you can pass `voice_preset` to `BarkProcessor.__call__(...)` >>> voice_preset = "v2/en_speaker_6" >>> inputs = processor("Hello, my dog is cute, I need him in my life", voice_preset=voice_preset) >>> audio_array = model.generate(**inputs, semantic_max_new_tokens=100) >>> audio_array = audio_array.cpu().numpy().squeeze() ``` """ # TODO (joao):workaround until nested generation config is compatible with PreTrained Model # todo: dict semantic_generation_config = BarkSemanticGenerationConfig(**self.generation_config.semantic_config) coarse_generation_config = BarkCoarseGenerationConfig(**self.generation_config.coarse_acoustics_config) fine_generation_config = BarkFineGenerationConfig(**self.generation_config.fine_acoustics_config) kwargs_semantic = { # if "attention_mask" is set, it should not be passed to CoarseModel and FineModel "attention_mask": kwargs.pop("attention_mask", None), "min_eos_p": kwargs.pop("min_eos_p", None), } kwargs_coarse = {} kwargs_fine = {} for key, value in kwargs.items(): if key.startswith("semantic_"): key = key[len("semantic_") :] kwargs_semantic[key] = value elif key.startswith("coarse_"): key = key[len("coarse_") :] kwargs_coarse[key] = value elif key.startswith("fine_"): key = key[len("fine_") :] kwargs_fine[key] = value else: # If the key is already in a specific config, then it's been set with a # submodules specific value and we don't override if key not in kwargs_semantic: kwargs_semantic[key] = value if key not in kwargs_coarse: kwargs_coarse[key] = value if key not in kwargs_fine: kwargs_fine[key] = value # 1. Generate from the semantic model semantic_output = self.semantic.generate( input_ids, history_prompt=history_prompt, semantic_generation_config=semantic_generation_config, **kwargs_semantic, ) # 2. Generate from the coarse model coarse_output = self.coarse_acoustics.generate( semantic_output, history_prompt=history_prompt, semantic_generation_config=semantic_generation_config, coarse_generation_config=coarse_generation_config, codebook_size=self.generation_config.codebook_size, return_output_lengths=return_output_lengths, **kwargs_coarse, ) output_lengths = None if return_output_lengths: coarse_output, output_lengths = coarse_output # (batch_size, seq_len*coarse_codebooks) -> (batch_size, seq_len) output_lengths = output_lengths // coarse_generation_config.n_coarse_codebooks # 3. "generate" from the fine model output = self.fine_acoustics.generate( coarse_output, history_prompt=history_prompt, semantic_generation_config=semantic_generation_config, coarse_generation_config=coarse_generation_config, fine_generation_config=fine_generation_config, codebook_size=self.generation_config.codebook_size, **kwargs_fine, ) if getattr(self, "fine_acoustics_hook", None) is not None: # Manually offload fine_acoustics to CPU # and load codec_model to GPU # since bark doesn't use codec_model forward pass self.fine_acoustics_hook.offload() self.codec_model = self.codec_model.to(self.device) # 4. Decode the output and generate audio array audio = self.codec_decode(output, output_lengths) if getattr(self, "codec_model_hook", None) is not None: # Offload codec_model to CPU self.codec_model_hook.offload() if return_output_lengths: output_lengths = [len(sample) for sample in audio] audio = nn.utils.rnn.pad_sequence(audio, batch_first=True, padding_value=0) return audio, output_lengths return audio @classmethod def _check_and_enable_flash_attn_2( cls, config, torch_dtype: Optional[torch.dtype] = None, device_map: Optional[Union[str, Dict[str, int]]] = None, hard_check_only: bool = False, check_device_map: bool = False, ): """ `_check_and_enable_flash_attn_2` originally don't expand flash attention enabling to the model sub-configurations. We override the original method to make sure that Bark sub-models are using Flash Attention if necessary. If you don't know about Flash Attention, check out the official repository of flash attention: https://github.com/Dao-AILab/flash-attention For using Flash Attention 1.0 you can do it directly via the `BetterTransformer` API, have a look at this specific section of the documentation to learn more about it: https://huggingface.co/docs/transformers/main/en/perf_infer_gpu_one#decoder-models The method checks if the current setup is compatible with Flash Attention as it requires the model to be in half precision and not ran on CPU. If all checks pass and `hard_check_only` is False, the method will set the config attribute `_attn_implementation` to "flash_attention_2" so that the model can initialize the correct attention module """ config = super()._check_and_enable_flash_attn_2( config, torch_dtype, device_map, hard_check_only=hard_check_only, check_device_map=check_device_map ) config.semantic_config._attn_implementation = config._attn_implementation config.coarse_acoustics_config._attn_implementation = config._attn_implementation config.fine_acoustics_config._attn_implementation = config._attn_implementation return config
transformers/src/transformers/models/bark/modeling_bark.py/0
{ "file_path": "transformers/src/transformers/models/bark/modeling_bark.py", "repo_id": "transformers", "token_count": 37251 }
321
# coding=utf-8 # Copyright Microsoft Research and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ BEiT model configuration""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices logger = logging.get_logger(__name__) BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP = { "microsoft/beit-base-patch16-224-pt22k": ( "https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json" ), # See all BEiT models at https://huggingface.co/models?filter=beit } class BeitConfig(BackboneConfigMixin, PretrainedConfig): r""" This is the configuration class to store the configuration of a [`BeitModel`]. It is used to instantiate an BEiT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the BEiT [microsoft/beit-base-patch16-224-pt22k](https://huggingface.co/microsoft/beit-base-patch16-224-pt22k) architecture. Args: vocab_size (`int`, *optional*, defaults to 8192): Vocabulary size of the BEiT model. Defines the number of different image tokens that can be used during pre-training. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch. num_channels (`int`, *optional*, defaults to 3): The number of input channels. use_mask_token (`bool`, *optional*, defaults to `False`): Whether to use a mask token for masked image modeling. use_absolute_position_embeddings (`bool`, *optional*, defaults to `False`): Whether to use BERT-style absolute position embeddings. use_relative_position_bias (`bool`, *optional*, defaults to `False`): Whether to use T5-style relative position embeddings in the self-attention layers. use_shared_relative_position_bias (`bool`, *optional*, defaults to `False`): Whether to use the same relative position embeddings across all self-attention layers of the Transformer. layer_scale_init_value (`float`, *optional*, defaults to 0.1): Scale to use in the self-attention layers. 0.1 for base, 1e-5 for large. Set 0 to disable layer scale. drop_path_rate (`float`, *optional*, defaults to 0.1): Stochastic depth rate per sample (when applied in the main path of residual layers). use_mean_pooling (`bool`, *optional*, defaults to `True`): Whether to mean pool the final hidden states of the patches instead of using the final hidden state of the CLS token, before applying the classification head. pool_scales (`Tuple[int]`, *optional*, defaults to `[1, 2, 3, 6]`): Pooling scales used in Pooling Pyramid Module applied on the last feature map. use_auxiliary_head (`bool`, *optional*, defaults to `True`): Whether to use an auxiliary head during training. auxiliary_loss_weight (`float`, *optional*, defaults to 0.4): Weight of the cross-entropy loss of the auxiliary head. auxiliary_channels (`int`, *optional*, defaults to 256): Number of channels to use in the auxiliary head. auxiliary_num_convs (`int`, *optional*, defaults to 1): Number of convolutional layers to use in the auxiliary head. auxiliary_concat_input (`bool`, *optional*, defaults to `False`): Whether to concatenate the output of the auxiliary head with the input before the classification layer. semantic_loss_ignore_index (`int`, *optional*, defaults to 255): The index that is ignored by the loss function of the semantic segmentation model. out_features (`List[str]`, *optional*): If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc. (depending on how many stages the model has). If unset and `out_indices` is set, will default to the corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the same order as defined in the `stage_names` attribute. out_indices (`List[int]`, *optional*): If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how many stages the model has). If unset and `out_features` is set, will default to the corresponding stages. If unset and `out_features` is unset, will default to the last stage. Must be in the same order as defined in the `stage_names` attribute. add_fpn (`bool`, *optional*, defaults to `False`): Whether to add a FPN as part of the backbone. Only relevant for [`BeitBackbone`]. reshape_hidden_states (`bool`, *optional*, defaults to `True`): Whether to reshape the feature maps to 4D tensors of shape `(batch_size, hidden_size, height, width)` in case the model is used as backbone. If `False`, the feature maps will be 3D tensors of shape `(batch_size, seq_len, hidden_size)`. Only relevant for [`BeitBackbone`]. Example: ```python >>> from transformers import BeitConfig, BeitModel >>> # Initializing a BEiT beit-base-patch16-224-pt22k style configuration >>> configuration = BeitConfig() >>> # Initializing a model (with random weights) from the beit-base-patch16-224-pt22k style configuration >>> model = BeitModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "beit" def __init__( self, vocab_size=8192, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, image_size=224, patch_size=16, num_channels=3, use_mask_token=False, use_absolute_position_embeddings=False, use_relative_position_bias=False, use_shared_relative_position_bias=False, layer_scale_init_value=0.1, drop_path_rate=0.1, use_mean_pooling=True, pool_scales=[1, 2, 3, 6], use_auxiliary_head=True, auxiliary_loss_weight=0.4, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=False, semantic_loss_ignore_index=255, out_features=None, out_indices=None, add_fpn=False, reshape_hidden_states=True, **kwargs, ): super().__init__(**kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.use_mask_token = use_mask_token self.use_absolute_position_embeddings = use_absolute_position_embeddings self.use_relative_position_bias = use_relative_position_bias self.use_shared_relative_position_bias = use_shared_relative_position_bias self.layer_scale_init_value = layer_scale_init_value self.drop_path_rate = drop_path_rate self.use_mean_pooling = use_mean_pooling # decode head attributes (semantic segmentation) self.pool_scales = pool_scales # auxiliary head attributes (semantic segmentation) self.use_auxiliary_head = use_auxiliary_head self.auxiliary_loss_weight = auxiliary_loss_weight self.auxiliary_channels = auxiliary_channels self.auxiliary_num_convs = auxiliary_num_convs self.auxiliary_concat_input = auxiliary_concat_input self.semantic_loss_ignore_index = semantic_loss_ignore_index # handle backwards compatibility if "segmentation_indices" in kwargs: logger.warning( "The `segmentation_indices` argument is deprecated and will be removed in a future version, use `out_indices` instead.", FutureWarning, ) out_indices = kwargs.pop("segmentation_indices") # backbone attributes self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, self.num_hidden_layers + 1)] self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=out_features, out_indices=out_indices, stage_names=self.stage_names ) self.add_fpn = add_fpn self.reshape_hidden_states = reshape_hidden_states # Copied from transformers.models.vit.configuration_vit.ViTOnnxConfig class BeitOnnxConfig(OnnxConfig): torch_onnx_minimum_version = version.parse("1.11") @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def atol_for_validation(self) -> float: return 1e-4
transformers/src/transformers/models/beit/configuration_beit.py/0
{ "file_path": "transformers/src/transformers/models/beit/configuration_beit.py", "repo_id": "transformers", "token_count": 4551 }
322
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fast Tokenization classes for Bert.""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bert import BertTokenizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "google-bert/bert-base-uncased": "https://huggingface.co/google-bert/bert-base-uncased/resolve/main/vocab.txt", "google-bert/bert-large-uncased": "https://huggingface.co/google-bert/bert-large-uncased/resolve/main/vocab.txt", "google-bert/bert-base-cased": "https://huggingface.co/google-bert/bert-base-cased/resolve/main/vocab.txt", "google-bert/bert-large-cased": "https://huggingface.co/google-bert/bert-large-cased/resolve/main/vocab.txt", "google-bert/bert-base-multilingual-uncased": ( "https://huggingface.co/google-bert/bert-base-multilingual-uncased/resolve/main/vocab.txt" ), "google-bert/bert-base-multilingual-cased": "https://huggingface.co/google-bert/bert-base-multilingual-cased/resolve/main/vocab.txt", "google-bert/bert-base-chinese": "https://huggingface.co/google-bert/bert-base-chinese/resolve/main/vocab.txt", "google-bert/bert-base-german-cased": "https://huggingface.co/google-bert/bert-base-german-cased/resolve/main/vocab.txt", "google-bert/bert-large-uncased-whole-word-masking": ( "https://huggingface.co/google-bert/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt" ), "google-bert/bert-large-cased-whole-word-masking": ( "https://huggingface.co/google-bert/bert-large-cased-whole-word-masking/resolve/main/vocab.txt" ), "google-bert/bert-large-uncased-whole-word-masking-finetuned-squad": ( "https://huggingface.co/google-bert/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt" ), "google-bert/bert-large-cased-whole-word-masking-finetuned-squad": ( "https://huggingface.co/google-bert/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt" ), "google-bert/bert-base-cased-finetuned-mrpc": ( "https://huggingface.co/google-bert/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt" ), "google-bert/bert-base-german-dbmdz-cased": "https://huggingface.co/google-bert/bert-base-german-dbmdz-cased/resolve/main/vocab.txt", "google-bert/bert-base-german-dbmdz-uncased": ( "https://huggingface.co/google-bert/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt" ), "TurkuNLP/bert-base-finnish-cased-v1": ( "https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt" ), "TurkuNLP/bert-base-finnish-uncased-v1": ( "https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt" ), "wietsedv/bert-base-dutch-cased": ( "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt" ), }, "tokenizer_file": { "google-bert/bert-base-uncased": "https://huggingface.co/google-bert/bert-base-uncased/resolve/main/tokenizer.json", "google-bert/bert-large-uncased": "https://huggingface.co/google-bert/bert-large-uncased/resolve/main/tokenizer.json", "google-bert/bert-base-cased": "https://huggingface.co/google-bert/bert-base-cased/resolve/main/tokenizer.json", "google-bert/bert-large-cased": "https://huggingface.co/google-bert/bert-large-cased/resolve/main/tokenizer.json", "google-bert/bert-base-multilingual-uncased": ( "https://huggingface.co/google-bert/bert-base-multilingual-uncased/resolve/main/tokenizer.json" ), "google-bert/bert-base-multilingual-cased": ( "https://huggingface.co/google-bert/bert-base-multilingual-cased/resolve/main/tokenizer.json" ), "google-bert/bert-base-chinese": "https://huggingface.co/google-bert/bert-base-chinese/resolve/main/tokenizer.json", "google-bert/bert-base-german-cased": "https://huggingface.co/google-bert/bert-base-german-cased/resolve/main/tokenizer.json", "google-bert/bert-large-uncased-whole-word-masking": ( "https://huggingface.co/google-bert/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json" ), "google-bert/bert-large-cased-whole-word-masking": ( "https://huggingface.co/google-bert/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json" ), "google-bert/bert-large-uncased-whole-word-masking-finetuned-squad": ( "https://huggingface.co/google-bert/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json" ), "google-bert/bert-large-cased-whole-word-masking-finetuned-squad": ( "https://huggingface.co/google-bert/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json" ), "google-bert/bert-base-cased-finetuned-mrpc": ( "https://huggingface.co/google-bert/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json" ), "google-bert/bert-base-german-dbmdz-cased": ( "https://huggingface.co/google-bert/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json" ), "google-bert/bert-base-german-dbmdz-uncased": ( "https://huggingface.co/google-bert/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json" ), "TurkuNLP/bert-base-finnish-cased-v1": ( "https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json" ), "TurkuNLP/bert-base-finnish-uncased-v1": ( "https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json" ), "wietsedv/bert-base-dutch-cased": ( "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json" ), }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "google-bert/bert-base-uncased": 512, "google-bert/bert-large-uncased": 512, "google-bert/bert-base-cased": 512, "google-bert/bert-large-cased": 512, "google-bert/bert-base-multilingual-uncased": 512, "google-bert/bert-base-multilingual-cased": 512, "google-bert/bert-base-chinese": 512, "google-bert/bert-base-german-cased": 512, "google-bert/bert-large-uncased-whole-word-masking": 512, "google-bert/bert-large-cased-whole-word-masking": 512, "google-bert/bert-large-uncased-whole-word-masking-finetuned-squad": 512, "google-bert/bert-large-cased-whole-word-masking-finetuned-squad": 512, "google-bert/bert-base-cased-finetuned-mrpc": 512, "google-bert/bert-base-german-dbmdz-cased": 512, "google-bert/bert-base-german-dbmdz-uncased": 512, "TurkuNLP/bert-base-finnish-cased-v1": 512, "TurkuNLP/bert-base-finnish-uncased-v1": 512, "wietsedv/bert-base-dutch-cased": 512, } PRETRAINED_INIT_CONFIGURATION = { "google-bert/bert-base-uncased": {"do_lower_case": True}, "google-bert/bert-large-uncased": {"do_lower_case": True}, "google-bert/bert-base-cased": {"do_lower_case": False}, "google-bert/bert-large-cased": {"do_lower_case": False}, "google-bert/bert-base-multilingual-uncased": {"do_lower_case": True}, "google-bert/bert-base-multilingual-cased": {"do_lower_case": False}, "google-bert/bert-base-chinese": {"do_lower_case": False}, "google-bert/bert-base-german-cased": {"do_lower_case": False}, "google-bert/bert-large-uncased-whole-word-masking": {"do_lower_case": True}, "google-bert/bert-large-cased-whole-word-masking": {"do_lower_case": False}, "google-bert/bert-large-uncased-whole-word-masking-finetuned-squad": {"do_lower_case": True}, "google-bert/bert-large-cased-whole-word-masking-finetuned-squad": {"do_lower_case": False}, "google-bert/bert-base-cased-finetuned-mrpc": {"do_lower_case": False}, "google-bert/bert-base-german-dbmdz-cased": {"do_lower_case": False}, "google-bert/bert-base-german-dbmdz-uncased": {"do_lower_case": True}, "TurkuNLP/bert-base-finnish-cased-v1": {"do_lower_case": False}, "TurkuNLP/bert-base-finnish-uncased-v1": {"do_lower_case": True}, "wietsedv/bert-base-dutch-cased": {"do_lower_case": False}, } class BertTokenizerFast(PreTrainedTokenizerFast): r""" Construct a "fast" BERT tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece. This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): File containing the vocabulary. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. clean_text (`bool`, *optional*, defaults to `True`): Whether or not to clean the text before tokenization by removing any control characters and replacing all whitespaces by the classic one. tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). wordpieces_prefix (`str`, *optional*, defaults to `"##"`): The prefix for subwords. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES slow_tokenizer_class = BertTokenizer def __init__( self, vocab_file=None, tokenizer_file=None, do_lower_case=True, unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", tokenize_chinese_chars=True, strip_accents=None, **kwargs, ): super().__init__( vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs, ) normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__()) if ( normalizer_state.get("lowercase", do_lower_case) != do_lower_case or normalizer_state.get("strip_accents", strip_accents) != strip_accents or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars ): normalizer_class = getattr(normalizers, normalizer_state.pop("type")) normalizer_state["lowercase"] = do_lower_case normalizer_state["strip_accents"] = strip_accents normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state) self.do_lower_case = do_lower_case def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id] if token_ids_1 is not None: output += token_ids_1 + [self.sep_token_id] return output def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files)
transformers/src/transformers/models/bert/tokenization_bert_fast.py/0
{ "file_path": "transformers/src/transformers/models/bert/tokenization_bert_fast.py", "repo_id": "transformers", "token_count": 6896 }
323
# coding=utf-8 # Copyright 2018 Google AI, Google Brain and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tokenization classes for Big Bird model.""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: BigBirdTokenizer = None logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model", "google/bigbird-roberta-large": ( "https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model" ), "google/bigbird-base-trivia-itc": ( "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model" ), }, "tokenizer_file": { "google/bigbird-roberta-base": ( "https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json" ), "google/bigbird-roberta-large": ( "https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json" ), "google/bigbird-base-trivia-itc": ( "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json" ), }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "google/bigbird-roberta-base": 4096, "google/bigbird-roberta-large": 4096, "google/bigbird-base-trivia-itc": 4096, } SPIECE_UNDERLINE = "▁" class BigBirdTokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" BigBird tokenizer (backed by HuggingFace's *tokenizers* library). Based on [Unigram](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models). This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods Args: vocab_file (`str`): [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that contains the vocabulary necessary to instantiate a tokenizer. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. .. note:: When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES slow_tokenizer_class = BigBirdTokenizer model_input_names = ["input_ids", "attention_mask"] prefix_tokens: List[int] = [] def __init__( self, vocab_file=None, tokenizer_file=None, unk_token="<unk>", bos_token="<s>", eos_token="</s>", pad_token="<pad>", sep_token="[SEP]", mask_token="[MASK]", cls_token="[CLS]", **kwargs, ): bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token # Mask token behave like a normal word, i.e. include the space before it mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token super().__init__( vocab_file, tokenizer_file=tokenizer_file, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, **kwargs, ) self.vocab_file = vocab_file @property def can_save_slow_tokenizer(self) -> bool: return os.path.isfile(self.vocab_file) if self.vocab_file else False def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An BigBird sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return cls + token_ids_0 + sep return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of ids. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Set to True if the token list is already formatted with special tokens for the model Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: if token_ids_1 is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_0] if token_ids_1 is None: return [1] + ([0] * len(token_ids_0)) + [1] return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` if token_ids_1 is None, only returns the first portion of the mask (0s). Args: token_ids_0 (`List[int]`): List of ids. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,)
transformers/src/transformers/models/big_bird/tokenization_big_bird_fast.py/0
{ "file_path": "transformers/src/transformers/models/big_bird/tokenization_big_bird_fast.py", "repo_id": "transformers", "token_count": 4758 }
324
# coding=utf-8 # Copyright 2021 The Facebook, Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Blenderbot model configuration""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging logger = logging.get_logger(__name__) BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP = { "facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/config.json", # See all Blenderbot models at https://huggingface.co/models?filter=blenderbot } class BlenderbotConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`BlenderbotModel`]. It is used to instantiate an Blenderbot model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Blenderbot [facebook/blenderbot-3B](https://huggingface.co/facebook/blenderbot-3B) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50265): Vocabulary size of the Blenderbot model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`BlenderbotModel`] or [`TFBlenderbotModel`]. d_model (`int`, *optional*, defaults to 1024): Dimensionality of the layers and the pooler layer. encoder_layers (`int`, *optional*, defaults to 12): Number of encoder layers. decoder_layers (`int`, *optional*, defaults to 12): Number of decoder layers. encoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. encoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. max_position_embeddings (`int`, *optional*, defaults to 128): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. scale_embedding (`bool`, *optional*, defaults to `False`): Scale embeddings by diving by sqrt(d_model). use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models) forced_eos_token_id (`int`, *optional*, defaults to 2): The id of the token to force as the last generated token when `max_length` is reached. Usually set to `eos_token_id`. Example: ```python >>> from transformers import BlenderbotConfig, BlenderbotModel >>> # Initializing a Blenderbot facebook/blenderbot-3B style configuration >>> configuration = BlenderbotConfig() >>> # Initializing a model (with random weights) from the facebook/blenderbot-3B style configuration >>> model = BlenderbotModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "blenderbot" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self, vocab_size=8008, max_position_embeddings=128, encoder_layers=2, encoder_ffn_dim=10240, encoder_attention_heads=32, decoder_layers=24, decoder_ffn_dim=10240, decoder_attention_heads=32, encoder_layerdrop=0.0, decoder_layerdrop=0.0, use_cache=True, is_encoder_decoder=True, activation_function="gelu", d_model=2560, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, decoder_start_token_id=1, scale_embedding=False, pad_token_id=0, bos_token_id=1, eos_token_id=2, encoder_no_repeat_ngram_size=3, forced_eos_token_id=2, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.use_cache = use_cache self.num_hidden_layers = encoder_layers self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, decoder_start_token_id=decoder_start_token_id, encoder_no_repeat_ngram_size=encoder_no_repeat_ngram_size, forced_eos_token_id=forced_eos_token_id, **kwargs, ) class BlenderbotOnnxConfig(OnnxSeq2SeqConfigWithPast): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: common_inputs = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: common_inputs["decoder_input_ids"] = {0: "batch"} common_inputs["decoder_attention_mask"] = {0: "batch", 1: "past_decoder_sequence + sequence"} else: common_inputs["decoder_input_ids"] = {0: "batch", 1: "decoder_sequence"} common_inputs["decoder_attention_mask"] = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(common_inputs, direction="inputs") elif self.task == "causal-lm": common_inputs = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: _, num_decoder_layers = self.num_layers for i in range(num_decoder_layers): common_inputs[f"past_key_values.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"} common_inputs[f"past_key_values.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"} else: common_inputs = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs def outputs(self) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: common_outputs = super().outputs else: common_outputs = super(OnnxConfigWithPast, self).outputs if self.use_past: num_encoder_layers, _ = self.num_layers for i in range(num_encoder_layers): common_outputs[f"present.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"} common_outputs[f"present.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def _generate_dummy_inputs_for_default_and_seq2seq_lm( self, tokenizer: PreTrainedTokenizer, batch_size: int = -1, seq_length: int = -1, is_pair: bool = False, framework: Optional[TensorType] = None, ) -> Mapping[str, Any]: encoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( tokenizer, batch_size, seq_length, is_pair, framework ) # Generate decoder inputs decoder_seq_length = seq_length if not self.use_past else 1 decoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( tokenizer, batch_size, decoder_seq_length, is_pair, framework ) decoder_inputs = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} common_inputs = dict(**encoder_inputs, **decoder_inputs) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.") else: import torch batch, encoder_seq_length = common_inputs["input_ids"].shape decoder_seq_length = common_inputs["decoder_input_ids"].shape[1] num_encoder_attention_heads, num_decoder_attention_heads = self.num_attention_heads encoder_shape = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) decoder_past_length = decoder_seq_length decoder_shape = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) common_inputs["decoder_attention_mask"] = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(batch, decoder_past_length)], dim=1 ) common_inputs["past_key_values"] = [] _, num_decoder_layers = self.num_layers for _ in range(num_decoder_layers): common_inputs["past_key_values"].append( ( torch.zeros(decoder_shape), torch.zeros(decoder_shape), torch.zeros(encoder_shape), torch.zeros(encoder_shape), ) ) return common_inputs def _generate_dummy_inputs_for_causal_lm( self, tokenizer: PreTrainedTokenizer, batch_size: int = -1, seq_length: int = -1, is_pair: bool = False, framework: Optional[TensorType] = None, ) -> Mapping[str, Any]: common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( tokenizer, batch_size, seq_length, is_pair, framework ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.") else: import torch batch, seqlen = common_inputs["input_ids"].shape past_key_values_length = seqlen _, num_decoder_layers = self.num_layers num_encoder_attention_heads, _ = self.num_attention_heads past_shape = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) mask_dtype = common_inputs["attention_mask"].dtype common_inputs["attention_mask"] = torch.cat( [common_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1 ) common_inputs["past_key_values"] = [ (torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(num_decoder_layers) ] return common_inputs # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig._generate_dummy_inputs_for_sequence_classification_and_question_answering def _generate_dummy_inputs_for_sequence_classification_and_question_answering( self, tokenizer: PreTrainedTokenizer, batch_size: int = -1, seq_length: int = -1, is_pair: bool = False, framework: Optional[TensorType] = None, ) -> Mapping[str, Any]: # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX batch_size = compute_effective_axis_dimension( batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX token_to_add = tokenizer.num_special_tokens_to_add(is_pair) seq_length = compute_effective_axis_dimension( seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add ) # Generate dummy inputs according to compute batch and sequence dummy_input = [" ".join([tokenizer.unk_token]) * seq_length] * batch_size common_inputs = dict(tokenizer(dummy_input, return_tensors=framework)) return common_inputs # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.generate_dummy_inputs def generate_dummy_inputs( self, tokenizer: PreTrainedTokenizer, batch_size: int = -1, seq_length: int = -1, is_pair: bool = False, framework: Optional[TensorType] = None, ) -> Mapping[str, Any]: if self.task in ["default", "seq2seq-lm"]: common_inputs = self._generate_dummy_inputs_for_default_and_seq2seq_lm( tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework ) elif self.task == "causal-lm": common_inputs = self._generate_dummy_inputs_for_causal_lm( tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework ) else: common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework ) return common_inputs # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig._flatten_past_key_values_ def _flatten_past_key_values_(self, flattened_output, name, idx, t): if self.task in ["default", "seq2seq-lm"]: flattened_output = super()._flatten_past_key_values_(flattened_output, name, idx, t) else: flattened_output = super(OnnxSeq2SeqConfigWithPast, self)._flatten_past_key_values_( flattened_output, name, idx, t ) def fill_with_past_key_values_(self, inputs_or_outputs: Mapping[str, Mapping[int, str]], direction: str): if direction not in ["inputs", "outputs"]: raise ValueError(f'direction must either be "inputs" or "outputs", but {direction} was given') name = "past_key_values" if direction == "inputs" else "present" _, num_decoder_layers = self.num_layers encoder_sequence = "past_encoder_sequence" decoder_sequence = "past_decoder_sequence" if direction == "inputs" else "past_decoder_sequence + sequence" for i in range(num_decoder_layers): inputs_or_outputs[f"{name}.{i}.decoder.key"] = {0: "batch", 2: decoder_sequence} inputs_or_outputs[f"{name}.{i}.decoder.value"] = {0: "batch", 2: decoder_sequence} inputs_or_outputs[f"{name}.{i}.encoder.key"] = {0: "batch", 2: encoder_sequence} inputs_or_outputs[f"{name}.{i}.encoder.value"] = {0: "batch", 2: encoder_sequence}
transformers/src/transformers/models/blenderbot/configuration_blenderbot.py/0
{ "file_path": "transformers/src/transformers/models/blenderbot/configuration_blenderbot.py", "repo_id": "transformers", "token_count": 8374 }
325
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import re import requests import torch # git clone https://github.com/salesforce/BLIP.git from models.blip import blip_decoder from models.blip_itm import blip_itm from models.blip_vqa import blip_vqa from PIL import Image from torchvision import transforms from torchvision.transforms.functional import InterpolationMode from transformers import ( BertTokenizer, BlipConfig, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, ) def load_demo_image(image_size, device): img_url = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg" raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") transform = transforms.Compose( [ transforms.Resize((image_size, image_size), interpolation=InterpolationMode.BICUBIC), transforms.ToTensor(), transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)), ] ) image = transform(raw_image).unsqueeze(0).to(device) return image def rename_key(key): if "visual_encoder" in key: key = re.sub("visual_encoder*", "vision_model.encoder", key) if "blocks" in key: key = re.sub(r"blocks", "layers", key) if "attn" in key: key = re.sub(r"attn", "self_attn", key) if "norm1" in key: key = re.sub(r"norm1", "layer_norm1", key) if "norm2" in key: key = re.sub(r"norm2", "layer_norm2", key) if "encoder.norm" in key: key = re.sub(r"encoder.norm", "post_layernorm", key) if "encoder.patch_embed.proj" in key: key = re.sub(r"encoder.patch_embed.proj", "embeddings.patch_embedding", key) if "encoder.pos_embed" in key: key = re.sub(r"encoder.pos_embed", "embeddings.position_embedding", key) if "encoder.cls_token" in key: key = re.sub(r"encoder.cls_token", "embeddings.class_embedding", key) if "self_attn" in key: key = re.sub(r"self_attn.proj", "self_attn.projection", key) return key @torch.no_grad() def convert_blip_checkpoint(pytorch_dump_folder_path, config_path=None): """ Copy/paste/tweak model's weights to transformers design. """ if config_path is not None: config = BlipConfig.from_pretrained(config_path) else: config = BlipConfig(projection_dim=512, text_config={}, vision_config={}) hf_model = BlipForConditionalGeneration(config).eval() model_url = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth" pt_model = blip_decoder(pretrained=model_url, image_size=384, vit="base") pt_model = pt_model.eval() modified_state_dict = pt_model.state_dict() for key in modified_state_dict.copy(): value = modified_state_dict.pop(key) renamed_key = rename_key(key) modified_state_dict[renamed_key] = value hf_model.load_state_dict(modified_state_dict) image_size = 384 image = load_demo_image(image_size=image_size, device="cpu") tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased") input_ids = tokenizer(["a picture of"]).input_ids out = hf_model.generate(image, input_ids) assert out[0].tolist() == [30522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102] out = hf_model.generate(image) assert out[0].tolist() == [30522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102] if pytorch_dump_folder_path is not None: hf_model.save_pretrained(pytorch_dump_folder_path) # model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth' model_url = ( "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth" ) vqa_model = blip_vqa(pretrained=model_url, image_size=image_size, vit="base") vqa_model.eval() modified_state_dict = vqa_model.state_dict() for key in modified_state_dict.copy(): value = modified_state_dict.pop(key) renamed_key = rename_key(key) modified_state_dict[renamed_key] = value hf_vqa_model = BlipForQuestionAnswering(config) hf_vqa_model.load_state_dict(modified_state_dict) question = ["How many dogs are in this image?"] question_input_ids = tokenizer(question, return_tensors="pt").input_ids answer = hf_vqa_model.generate(question_input_ids, image) print(tokenizer.decode(answer[0])) assert tokenizer.decode(answer[0]) == "[UNK] 1 [SEP]" if pytorch_dump_folder_path is not None: hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa") model_url = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth" itm_model = blip_itm(pretrained=model_url, image_size=image_size, vit="base") itm_model.eval() modified_state_dict = itm_model.state_dict() for key in modified_state_dict.copy(): value = modified_state_dict.pop(key) renamed_key = rename_key(key) modified_state_dict[renamed_key] = value hf_itm_model = BlipForImageTextRetrieval(config) question = ["A picture of a woman with a dog sitting in a beach"] question_input_ids = tokenizer( question, return_tensors="pt", padding="max_length", truncation=True, max_length=35, ).input_ids hf_itm_model.load_state_dict(modified_state_dict) hf_itm_model.eval() out_itm = hf_itm_model(question_input_ids, image, use_itm_head=True) out = hf_itm_model(question_input_ids, image, use_itm_head=False) assert out[0].item() == 0.2110687494277954 assert torch.nn.functional.softmax(out_itm[0], dim=1)[:, 1].item() == 0.45698845386505127 if pytorch_dump_folder_path is not None: hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") args = parser.parse_args() convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
transformers/src/transformers/models/blip/convert_blip_original_pytorch_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/blip/convert_blip_original_pytorch_to_hf.py", "repo_id": "transformers", "token_count": 2808 }
326
# coding=utf-8 # Copyright 2023 HuggingFace Inc. Team and Bigscience Workshop. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Flax BLOOM model.""" import math from functools import partial from typing import Optional, Tuple import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.linen import combine_masks, dot_product_attention_weights, make_causal_mask from flax.linen.activation import tanh from flax.traverse_util import flatten_dict, unflatten_dict from jax import lax from ...modeling_flax_outputs import ( FlaxBaseModelOutput, FlaxBaseModelOutputWithPastAndCrossAttentions, FlaxCausalLMOutput, ) from ...modeling_flax_utils import FlaxPreTrainedModel, append_call_sample_docstring from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_bloom import BloomConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "bigscience/bloom" _CONFIG_FOR_DOC = "BloomConfig" BLOOM_START_DOCSTRING = r""" This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a Flax Linen [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior. Finally, this model supports inherent JAX features such as: - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) Parameters: config ([`BloomConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights. dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`): The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and `jax.numpy.bfloat16` (on TPUs). This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given `dtype`. **Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.** If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and [`~FlaxPreTrainedModel.to_bf16`]. """ BLOOM_INPUTS_DOCSTRING = r""" Args: input_ids (`numpy.ndarray` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length`. Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`BloomTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`): Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ def build_alibi_tensor(attention_mask: jnp.ndarray, num_heads: int, dtype: Optional[jnp.dtype] = jnp.float32): """ Flax implementation of the BLOOM Alibi tensor. BLOOM Alibi tensor is not causal as the original paper mentions, it relies on a translation invariance of softmax for quick implementation: with l being a tensor, and a fixed value `softmax(l+a) = softmax(l)`. Based on https://github.com/ofirpress/attention_with_linear_biases/blob/a35aaca144e0eb6b789dfcb46784c4b8e31b7983/fairseq/models/transformer.py#L742 Link to paper: https://arxiv.org/abs/2108.12409 Args: attention_mask (`jnp.ndarray`): Token-wise attention mask, this should be of shape `(batch_size, max_seq_len)`. num_heads (`int`): Number of attention heads. dtype (`jnp.dtype`, *optional*, defaults to `jnp.float32`): The data type (dtype) of the output tensor. Returns: Alibi tensor of shape `(batch_size * num_heads, 1, max_seq_len)`. """ batch_size, seq_length = attention_mask.shape closest_power_of_2 = 2 ** math.floor(math.log2(num_heads)) base = jnp.array(2 ** (-(2 ** -(math.log2(closest_power_of_2) - 3))), dtype=jnp.float32) powers = jnp.arange(1, 1 + closest_power_of_2, dtype=jnp.float32) slopes = jax.lax.pow(base, powers) if closest_power_of_2 != num_heads: extra_base = jnp.array(2 ** (-(2 ** -(math.log2(2 * closest_power_of_2) - 3))), dtype=jnp.float32) num_remaining_heads = min(closest_power_of_2, num_heads - closest_power_of_2) extra_powers = jnp.arange(1, 1 + 2 * num_remaining_heads, 2, dtype=jnp.float32) slopes = jnp.cat([slopes, jax.lax.pow(extra_base, extra_powers)], axis=0) # Note: the Alibi tensor will added to the attention bias that will be applied to the query, key product of attention # therefore, Alibi will have to be of shape (batch_size, num_heads, query_length, key_length) # => here we set (batch_size=1, num_heads=num_heads, query_length=1, key_length=max_length) # so that the query_length dimension will then be broadcast correctly. # This is more or less identical to T5's relative position bias: # https://github.com/huggingface/transformers/blob/f681437203baa7671de3174b0fa583c349d9d5e1/src/transformers/models/t5/modeling_t5.py#L527 arange_tensor = ((attention_mask.cumsum(axis=-1) - 1) * attention_mask)[:, None, :] alibi = slopes[..., None] * arange_tensor alibi = jnp.expand_dims(alibi, axis=2) return jnp.asarray(alibi, dtype) class FlaxBloomAttention(nn.Module): config: BloomConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.hidden_size = self.config.hidden_size self.num_heads = self.config.n_head self.head_dim = self.hidden_size // self.num_heads self.attention_softmax_in_fp32 = self.dtype is not jnp.float32 if self.head_dim * self.num_heads != self.hidden_size: raise ValueError( f"`hidden_size` must be divisible by `num_heads` (got `hidden_size`: {self.hidden_size} and " f"`num_heads`: {self.num_heads})." ) dense = partial( nn.Dense, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) self.query_key_value = dense(self.hidden_size * 3) self.dense = dense(self.hidden_size) self.resid_dropout = nn.Dropout(rate=self.config.hidden_dropout) def _split_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:-1] + (self.num_heads, self.head_dim * 3)) def _merge_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.hidden_size,)) @nn.compact # Copied from transformers.models.gptj.modeling_flax_gptj.FlaxGPTJAttention._concatenate_to_cache def _concatenate_to_cache(self, key, value, query, attention_mask): """ This function takes projected key, value states from a single input token and concatenates the states to cached states from previous steps. This function is slighly adapted from the official Flax repository: https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252 """ # detect if we're initializing by absence of existing cache data. is_initialized = self.has_variable("cache", "cached_key") cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype) cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype) cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32)) if is_initialized: *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape # update key, value caches with our new 1d spatial slices cur_index = cache_index.value indices = (0,) * len(batch_dims) + (cur_index, 0, 0) key = lax.dynamic_update_slice(cached_key.value, key, indices) value = lax.dynamic_update_slice(cached_value.value, value, indices) cached_key.value = key cached_value.value = value num_updated_cache_vectors = query.shape[1] cache_index.value = cache_index.value + num_updated_cache_vectors # causal mask for cached decoder self-attention: our single query position should only attend to those key # positions that have already been generated and cached, not the remaining zero elements. pad_mask = jnp.broadcast_to( jnp.arange(max_length) < cur_index + num_updated_cache_vectors, tuple(batch_dims) + (1, num_updated_cache_vectors, max_length), ) attention_mask = combine_masks(pad_mask, attention_mask) return key, value, attention_mask def __call__( self, hidden_states, residual, alibi, attention_mask=None, deterministic: bool = True, init_cache: bool = False, output_attentions: bool = False, ): batch_size, seq_length = hidden_states.shape[:2] # proj q, k, v fused_qkv = self.query_key_value(hidden_states) fused_qkv = self._split_heads(fused_qkv) query, key, value = jnp.split(fused_qkv, 3, axis=-1) causal_attention_mask = make_causal_mask(attention_mask, dtype="bool") # for fast decoding causal attention mask should be shifted causal_attention_mask_shift = ( self.variables["cache"]["cache_index"] if self.has_variable("cache", "cached_key") else 0 ) # fast decoding for generate requires special attention_mask if self.has_variable("cache", "cached_key"): max_decoder_length = self.variables["cache"]["cached_key"].shape[1] causal_attention_mask = jax.lax.dynamic_slice( causal_attention_mask, (0, 0, causal_attention_mask_shift, 0), (1, 1, seq_length, max_decoder_length), ) # broadcast causal attention mask & attention mask to fit for merge causal_attention_mask = jnp.broadcast_to( causal_attention_mask, (batch_size,) + causal_attention_mask.shape[1:] ) attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_attention_mask.shape) attention_mask = combine_masks(attention_mask, causal_attention_mask) dropout_rng = None if not deterministic and self.config.attention_dropout > 0.0: dropout_rng = self.make_rng("dropout") # During fast autoregressive decoding, we feed one position at a time, # and cache the keys and values step by step. if self.has_variable("cache", "cached_key") or init_cache: key, value, attention_mask = self._concatenate_to_cache(key, value, query, attention_mask) # transform boolean mask into float mask mask_value = jnp.finfo(self.dtype).min attention_bias = lax.select( attention_mask > 0, jnp.full(attention_mask.shape, 0.0).astype(self.dtype), jnp.full(attention_mask.shape, mask_value).astype(self.dtype), ) attention_bias = attention_bias + alibi # Cast in fp32 if the original dtype is different from fp32 attention_dtype = jnp.float32 if self.attention_softmax_in_fp32 else self.dtype attn_weights = dot_product_attention_weights( query, key, bias=attention_bias, dropout_rng=dropout_rng, dropout_rate=self.config.attention_dropout, deterministic=deterministic, dtype=attention_dtype, ) # Cast back in the original dtype if the native dtype is not fp32 if self.attention_softmax_in_fp32: attn_weights = attn_weights.astype(self.dtype) attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value) attn_output = self._merge_heads(attn_output) attn_output = self.dense(attn_output) attn_output = self.resid_dropout(attn_output, deterministic=deterministic) attn_output = attn_output + residual outputs = (attn_output, attn_weights) if output_attentions else (attn_output,) return outputs class BloomGELU(nn.Module): def setup(self): self.dtype = jnp.float32 def __call__(self, x): return x * 0.5 * (1.0 + tanh(0.79788456 * x * (1 + 0.044715 * x * x))) class FlaxBloomMLP(nn.Module): config: BloomConfig dtype: jnp.dtype = jnp.float32 def setup(self): hidden_size = self.config.hidden_size kernel_init = jax.nn.initializers.normal(self.config.initializer_range) self.dense_h_to_4h = nn.Dense(4 * hidden_size, dtype=self.dtype, kernel_init=kernel_init) self.dense_4h_to_h = nn.Dense(hidden_size, dtype=self.dtype, kernel_init=kernel_init) self.hidden_dropout = nn.Dropout(self.config.hidden_dropout) self.act = BloomGELU() def __call__(self, hidden_states, residual, deterministic: bool = True): hidden_states = self.dense_h_to_4h(hidden_states) hidden_states = self.act(hidden_states) intermediate_output = self.dense_4h_to_h(hidden_states) intermediate_output = intermediate_output + residual hidden_states = self.hidden_dropout(intermediate_output, deterministic=deterministic) return hidden_states class FlaxBloomBlock(nn.Module): config: BloomConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.input_layernorm = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype) self.self_attention = FlaxBloomAttention(self.config, dtype=self.dtype) self.post_attention_layernorm = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype) self.mlp = FlaxBloomMLP(self.config, dtype=self.dtype) self.apply_residual_connection_post_layernorm = self.config.apply_residual_connection_post_layernorm self.hidden_dropout = self.config.hidden_dropout def __call__( self, hidden_states, alibi, attention_mask=None, deterministic: bool = True, init_cache: bool = False, output_attentions: bool = False, ): layernorm_output = self.input_layernorm(hidden_states) # layer norm before saving residual if config calls for it if self.apply_residual_connection_post_layernorm: residual = layernorm_output else: residual = hidden_states # self-attention attn_outputs = self.self_attention( layernorm_output, residual=residual, alibi=alibi, attention_mask=attention_mask, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions, ) attention_output = attn_outputs[0] outputs = attn_outputs[1:] post_layernorm = self.post_attention_layernorm(attention_output) # set residual based on config if self.apply_residual_connection_post_layernorm: residual = post_layernorm else: residual = attention_output output = self.mlp(post_layernorm, residual, deterministic=deterministic) outputs = (output,) + outputs return outputs class FlaxBloomPreTrainedModel(FlaxPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = BloomConfig base_model_prefix = "transformer" module_class: nn.Module = None def __init__( self, config: BloomConfig, input_shape: Tuple = (1, 1), seed: int = 0, dtype: jnp.dtype = jnp.float32, _do_init: bool = True, **kwargs, ): module = self.module_class(config=config, dtype=dtype, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: # init input tensors input_ids = jnp.zeros(input_shape, dtype="i4") attention_mask = jnp.ones_like(input_ids) params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng} random_params = self.module.init(rngs, input_ids, attention_mask, return_dict=False)["params"] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params def init_cache(self, batch_size, max_length): r""" Args: batch_size (`int`): batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache. max_length (`int`): maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized cache. """ # init input variables to retrieve cache input_ids = jnp.ones((batch_size, max_length), dtype="i4") attention_mask = jnp.ones_like(input_ids) init_variables = self.module.init( jax.random.PRNGKey(0), input_ids, attention_mask, return_dict=False, init_cache=True ) return unfreeze(init_variables["cache"]) @add_start_docstrings_to_model_forward(BLOOM_INPUTS_DOCSTRING) def __call__( self, input_ids, attention_mask=None, past_key_values: dict = None, params: dict = None, dropout_rng: jax.random.PRNGKey = None, train: bool = False, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict batch_size, sequence_length = input_ids.shape if attention_mask is None: attention_mask = jnp.ones((batch_size, sequence_length)) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng inputs = {"params": params or self.params} # If past_key_values are passed then cache is already initialized a private flag init_cache has to be passed # down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be # changed by FlaxBloomAttention module if past_key_values: inputs["cache"] = past_key_values mutable = ["cache"] else: mutable = False outputs = self.module.apply( inputs, jnp.array(input_ids, dtype="i4"), jnp.array(attention_mask, dtype="i4"), not train, False, output_attentions, output_hidden_states, return_dict, rngs=rngs, mutable=mutable, ) # add updated cache to model output if past_key_values is not None and return_dict: outputs, past_key_values = outputs outputs["past_key_values"] = unfreeze(past_key_values["cache"]) return outputs elif past_key_values is not None and not return_dict: outputs, past_key_values = outputs outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:] return outputs class FlaxBloomBlockCollection(nn.Module): config: BloomConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.layers = [ FlaxBloomBlock(self.config, name=str(layer_number), dtype=self.dtype) for layer_number in range(self.config.num_hidden_layers) ] def __call__( self, hidden_states, alibi, attention_mask=None, deterministic: bool = True, init_cache: bool = False, output_attentions: bool = False, output_hidden_states: bool = False, ): all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for layer_number in range(self.config.num_hidden_layers): if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = self.layers[layer_number]( hidden_states, alibi=alibi, attention_mask=attention_mask, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions += (layer_outputs[1],) # this contains possible `None` values - `FlaxBloomModule` will filter them out outputs = (hidden_states, all_hidden_states, all_attentions) return outputs class FlaxBloomModule(nn.Module): config: BloomConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.embed_dim = self.config.hidden_size # word embeddings (no positional embedding layer) self.word_embeddings = nn.Embed( self.config.vocab_size, self.embed_dim, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), dtype=self.dtype, ) # post-embedding layernorm self.word_embeddings_layernorm = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype) # transformer layers self.h = FlaxBloomBlockCollection(self.config, dtype=self.dtype) # final layernorm self.ln_f = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype) def __call__( self, input_ids=None, attention_mask=None, deterministic=True, init_cache: bool = False, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): inputs_embeds = self.word_embeddings(input_ids) # do post-embedding layernorm hidden_states = self.word_embeddings_layernorm(inputs_embeds) # build alibi depending on `attention_mask` alibi = build_alibi_tensor(attention_mask, self.config.n_head, dtype=hidden_states.dtype) outputs = self.h( hidden_states, alibi=alibi, attention_mask=attention_mask, deterministic=deterministic, init_cache=init_cache, output_hidden_states=output_hidden_states, output_attentions=output_attentions, ) hidden_states = outputs[0] hidden_states = self.ln_f(hidden_states) if output_hidden_states: all_hidden_states = outputs[1] + (hidden_states,) outputs = (hidden_states, all_hidden_states) + outputs[2:] else: outputs = (hidden_states,) + outputs[1:] if not return_dict: return tuple(v for v in [outputs[0], outputs[-1]] if v is not None) return FlaxBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, hidden_states=outputs[1], attentions=outputs[-1], ) @add_start_docstrings( "The bare Bloom Model transformer outputting raw hidden-states without any specific head on top.", BLOOM_START_DOCSTRING, ) # Copied from transformers.models.gpt_neo.modeling_flax_gpt_neo.FlaxGPTNeoModel with GPTNeo->Bloom class FlaxBloomModel(FlaxBloomPreTrainedModel): module_class = FlaxBloomModule append_call_sample_docstring(FlaxBloomModel, _CHECKPOINT_FOR_DOC, FlaxBaseModelOutput, _CONFIG_FOR_DOC) class FlaxBloomForCausalLMModule(nn.Module): config: BloomConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.transformer = FlaxBloomModule(self.config, dtype=self.dtype) self.lm_head = nn.Dense( self.config.vocab_size, use_bias=False, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), ) def __call__( self, input_ids, attention_mask, deterministic: bool = True, init_cache: bool = False, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): outputs = self.transformer( input_ids, attention_mask=attention_mask, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_kernel = self.transformer.variables["params"]["word_embeddings"]["embedding"].T lm_logits = self.lm_head.apply({"params": {"kernel": shared_kernel}}, hidden_states) else: lm_logits = self.lm_head(hidden_states) if not return_dict: return (lm_logits,) + outputs[1:] return FlaxCausalLMOutput(logits=lm_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings( """ The Bloom Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). """, BLOOM_START_DOCSTRING, ) class FlaxBloomForCausalLM(FlaxBloomPreTrainedModel): module_class = FlaxBloomForCausalLMModule def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array] = None): # initializing the cache batch_size, seq_length = input_ids.shape past_key_values = self.init_cache(batch_size, max_length) # Note that usually one would have to put 0's in the attention_mask for # x > input_ids.shape[-1] and x < cache_length. But since Bloom uses a causal mask, # those positions are masked anyway. Thus, we can create a single static attention_mask here, # which is more efficient for compilation extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4") if attention_mask is not None: extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0)) return { "past_key_values": past_key_values, "attention_mask": extended_attention_mask, } def update_inputs_for_generation(self, model_outputs, model_kwargs): model_kwargs["past_key_values"] = model_outputs.past_key_values return model_kwargs append_call_sample_docstring(FlaxBloomForCausalLM, _CHECKPOINT_FOR_DOC, FlaxCausalLMOutput, _CONFIG_FOR_DOC)
transformers/src/transformers/models/bloom/modeling_flax_bloom.py/0
{ "file_path": "transformers/src/transformers/models/bloom/modeling_flax_bloom.py", "repo_id": "transformers", "token_count": 12766 }
327
# coding=utf-8 # Copyright 2021 The Open AI Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for CLIP.""" import json import os import unicodedata from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = { "vocab_file": "vocab.json", "merges_file": "merges.txt", } PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "openai/clip-vit-base-patch32": "https://huggingface.co/openai/clip-vit-base-patch32/resolve/main/vocab.json", }, "merges_file": { "openai/clip-vit-base-patch32": "https://huggingface.co/openai/clip-vit-base-patch32/resolve/main/merges.txt", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "openai/clip-vit-base-patch32": 77, } PRETRAINED_INIT_CONFIGURATION = { "openai/clip-vit-base-patch32": {}, } @lru_cache() def bytes_to_unicode(): """ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control characters the bpe code barfs on. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. """ bs = ( list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1)) ) cs = bs[:] n = 0 for b in range(2**8): if b not in bs: bs.append(b) cs.append(2**8 + n) n += 1 cs = [chr(n) for n in cs] return dict(zip(bs, cs)) def get_pairs(word): """ Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length strings). """ pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs def whitespace_clean(text): text = re.sub(r"\s+", " ", text) text = text.strip() return text # Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize def whitespace_tokenize(text): """Runs basic whitespace cleaning and splitting on a piece of text.""" text = text.strip() if not text: return [] tokens = text.split() return tokens # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer class BasicTokenizer(object): """ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). Args: do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). do_split_on_punc (`bool`, *optional*, defaults to `True`): In some instances we want to skip the basic punctuation splitting so that later tokenization can capture the full context of the words, such as contractions. """ def __init__( self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None, do_split_on_punc=True, ): if never_split is None: never_split = [] self.do_lower_case = do_lower_case self.never_split = set(never_split) self.tokenize_chinese_chars = tokenize_chinese_chars self.strip_accents = strip_accents self.do_split_on_punc = do_split_on_punc def tokenize(self, text, never_split=None): """ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer. Args: never_split (`List[str]`, *optional*) Kept for backward compatibility purposes. Now implemented directly at the base class level (see [`PreTrainedTokenizer.tokenize`]) List of token not to split. """ # union() returns a new set by concatenating the two sets. never_split = self.never_split.union(set(never_split)) if never_split else self.never_split text = self._clean_text(text) # This was added on November 1st, 2018 for the multilingual and Chinese # models. This is also applied to the English models now, but it doesn't # matter since the English models were not trained on any Chinese data # and generally don't have any Chinese data in them (there are Chinese # characters in the vocabulary because Wikipedia does have some Chinese # words in the English Wikipedia.). if self.tokenize_chinese_chars: text = self._tokenize_chinese_chars(text) # prevents treating the same character with different unicode codepoints as different characters unicode_normalized_text = unicodedata.normalize("NFC", text) orig_tokens = whitespace_tokenize(unicode_normalized_text) split_tokens = [] for token in orig_tokens: if token not in never_split: if self.do_lower_case: token = token.lower() if self.strip_accents is not False: token = self._run_strip_accents(token) elif self.strip_accents: token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token, never_split)) output_tokens = whitespace_tokenize(" ".join(split_tokens)) return output_tokens def _run_strip_accents(self, text): """Strips accents from a piece of text.""" text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output) def _run_split_on_punc(self, text, never_split=None): """Splits punctuation on a piece of text.""" if not self.do_split_on_punc or (never_split is not None and text in never_split): return [text] chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return ["".join(x) for x in output] def _tokenize_chinese_chars(self, text): """Adds whitespace around any CJK character.""" output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(" ") output.append(char) output.append(" ") else: output.append(char) return "".join(output) def _is_chinese_char(self, cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4E00 and cp <= 0x9FFF) or (cp >= 0x3400 and cp <= 0x4DBF) # or (cp >= 0x20000 and cp <= 0x2A6DF) # or (cp >= 0x2A700 and cp <= 0x2B73F) # or (cp >= 0x2B740 and cp <= 0x2B81F) # or (cp >= 0x2B820 and cp <= 0x2CEAF) # or (cp >= 0xF900 and cp <= 0xFAFF) or (cp >= 0x2F800 and cp <= 0x2FA1F) # ): # return True return False def _clean_text(self, text): """Performs invalid character removal and whitespace cleanup on text.""" output = [] for char in text: cp = ord(char) if cp == 0 or cp == 0xFFFD or _is_control(char): continue if _is_whitespace(char): output.append(" ") else: output.append(char) return "".join(output) class CLIPTokenizer(PreTrainedTokenizer): """ Construct a CLIP tokenizer. Based on byte-level Byte-Pair-Encoding. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. merges_file (`str`): Path to the merges file. errors (`str`, *optional*, defaults to `"replace"`): Paradigm to follow when decoding bytes to UTF-8. See [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (`str`, *optional*, defaults to `"<|startoftext|>"`): The beginning of sequence token. eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`): The end of sequence token. pad_token (`str`, *optional*, defaults to `"<|endoftext|>"`): The token used for padding, for example when batching sequences of different lengths. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file, merges_file, errors="replace", unk_token="<|endoftext|>", bos_token="<|startoftext|>", eos_token="<|endoftext|>", pad_token="<|endoftext|>", # hack to enable padding **kwargs, ): bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token try: import ftfy self.fix_text = ftfy.fix_text except ImportError: logger.info("ftfy or spacy is not installed using custom BasicTokenizer instead of ftfy.") self.nlp = BasicTokenizer(strip_accents=False, do_split_on_punc=False) self.fix_text = None with open(vocab_file, encoding="utf-8") as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for k, v in self.encoder.items()} self.errors = errors # how to handle errors in decoding self.byte_encoder = bytes_to_unicode() self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} with open(merges_file, encoding="utf-8") as merges_handle: bpe_merges = merges_handle.read().strip().split("\n")[1 : 49152 - 256 - 2 + 1] bpe_merges = [tuple(merge.split()) for merge in bpe_merges] self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) self.cache = {"<|startoftext|>": "<|startoftext|>", "<|endoftext|>": "<|endoftext|>"} self.pat = re.compile( r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE, ) super().__init__( errors=errors, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, **kwargs, ) @property def vocab_size(self): return len(self.encoder) def get_vocab(self): return dict(self.encoder, **self.added_tokens_encoder) def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A CLIP sequence has the following format: - single sequence: `<|startoftext|> X <|endoftext|>` Pairs of sequences are not the expected use case, but they will be handled without a separator. Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ bos_token = [self.bos_token_id] eos_token = [self.eos_token_id] if token_ids_1 is None: return bos_token + token_ids_0 + eos_token return bos_token + token_ids_0 + eos_token + eos_token + token_ids_1 + eos_token def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is None: return [1] + ([0] * len(token_ids_0)) + [1] return [1] + ([0] * len(token_ids_0)) + [1] + [1] + ([0] * len(token_ids_1)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed. CLIP does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros. """ bos_token = [self.bos_token_id] eos_token = [self.eos_token_id] if token_ids_1 is None: return len(bos_token + token_ids_0 + eos_token) * [0] return len(bos_token + token_ids_0 + eos_token + eos_token + token_ids_1 + eos_token) * [0] def bpe(self, token): if token in self.cache: return self.cache[token] word = tuple(token[:-1]) + (token[-1] + "</w>",) pairs = get_pairs(word) if not pairs: return token + "</w>" while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if word[i] == first and i < len(word) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = " ".join(word) self.cache[token] = word return word def _tokenize(self, text): """Tokenize a string.""" bpe_tokens = [] if self.fix_text is None: text = " ".join(self.nlp.tokenize(text)) else: text = whitespace_clean(self.fix_text(text)).lower() for token in re.findall(self.pat, text): token = "".join( self.byte_encoder[b] for b in token.encode("utf-8") ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" ")) return bpe_tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.decoder.get(index) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" text = "".join(tokens) byte_array = bytearray([self.byte_decoder[c] for c in text]) text = byte_array.decode("utf-8", errors=self.errors).replace("</w>", " ").strip() return text def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error("Vocabulary path ({}) should be a directory".format(save_directory)) return vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) merge_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") index = 0 with open(merge_file, "w", encoding="utf-8") as writer: writer.write("#version: 0.2\n") for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( "Saving vocabulary to {}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!".format(merge_file) ) index = token_index writer.write(" ".join(bpe_tokens) + "\n") index += 1 return vocab_file, merge_file
transformers/src/transformers/models/clip/tokenization_clip.py/0
{ "file_path": "transformers/src/transformers/models/clip/tokenization_clip.py", "repo_id": "transformers", "token_count": 9519 }
328
# coding=utf-8 # Copyright 2023 MetaAI and the HuggingFace Inc. team. All rights reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for Code LLaMA.""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...convert_slow_tokenizer import import_protobuf from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging, requires_backends logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "hf-internal-testing/llama-code-tokenizer": "https://huggingface.co/hf-internal-testing/llama-tokenizer/resolve/main/tokenizer.model", }, "tokenizer_file": { "hf-internal-testing/llama-code-tokenizer": "https://huggingface.co/hf-internal-testing/llama-tokenizer/resolve/main/tokenizer_config.json", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "hf-internal-testing/llama-code-tokenizer": 2048, } SPIECE_UNDERLINE = "▁" B_INST, E_INST = "[INST]", "[/INST]" B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n" # fmt: off DEFAULT_SYSTEM_PROMPT = """You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your \ answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure\ that your responses are socially unbiased and positive in nature. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not \ correct. If you don't know the answer to a question, please don't share false information.""" # fmt: on class CodeLlamaTokenizer(PreTrainedTokenizer): """ Construct a CodeLlama tokenizer. Based on byte-level Byte-Pair-Encoding. The default padding token is unset as there is no padding token in the original model. The default configuration match that of [codellama/CodeLlama-7b-Instruct-hf](https://huggingface.co/codellama/CodeLlama-7b-Instruct-hf/blob/main/tokenizer_config.json) which supports prompt infilling. Args: vocab_file (`str`): Path to the vocabulary file. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> prefix_token (`str`, *optional*, defaults to `"▁<PRE>"`): Prefix token used for infilling. middle_token (`str`, *optional*, defaults to `"▁<MID>"`): Middle token used for infilling. suffix_token (`str`, *optional*, defaults to `"▁<SUF>"`): Suffix token used for infilling. eot_token (`str`, *optional*, defaults to `"▁<EOT>"`): End of text token used for infilling. fill_token (`str`, *optional*, defaults to `"<FILL_ME>"`): The token used to split the input between the prefix and suffix. suffix_first (`bool`, *optional*, defaults to `False`): Whether the input prompt and suffix should be formatted with the suffix first. sp_model_kwargs (`dict`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set: - `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. add_bos_token (`bool`, *optional*, defaults to `True`): Whether to add a beginning of sequence token at the start of sequences. add_eos_token (`bool`, *optional*, defaults to `False`): Whether to add an end of sequence token at the end of sequences. clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`): Whether or not to clean up the tokenization spaces. additional_special_tokens (`List[str]`, *optional*): Additional special tokens used by the tokenizer. use_default_system_prompt (`bool`, *optional*, defaults to `False`): Whether or not the default system prompt for Llama should be used. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file, unk_token="<unk>", bos_token="<s>", eos_token="</s>", prefix_token="▁<PRE>", middle_token="▁<MID>", suffix_token="▁<SUF>", eot_token="▁<EOT>", fill_token="<FILL_ME>", suffix_first=False, sp_model_kwargs: Optional[Dict[str, Any]] = None, add_bos_token=True, add_eos_token=False, clean_up_tokenization_spaces=False, additional_special_tokens=None, use_default_system_prompt=False, **kwargs, ): requires_backends(self, "protobuf") self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs bos_token = AddedToken(bos_token, normalized=False, special=True) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, normalized=False, special=True) if isinstance(eos_token, str) else eos_token unk_token = AddedToken(unk_token, normalized=False, special=True) if isinstance(unk_token, str) else unk_token self.use_default_system_prompt = use_default_system_prompt # mark tokens special to skip them additional_special_tokens = additional_special_tokens or [] for token in [prefix_token, middle_token, suffix_token, eot_token]: additional_special_tokens += [token] if token is not None else [] self.vocab_file = vocab_file self.add_bos_token = add_bos_token self.add_eos_token = add_eos_token self._prefix_token = prefix_token self._middle_token = middle_token self._suffix_token = suffix_token self._eot_token = eot_token self.fill_token = fill_token self.suffix_first = suffix_first self.sp_model = self.get_spm_processor() super().__init__( bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, add_bos_token=add_bos_token, add_eos_token=add_eos_token, prefix_token=prefix_token, middle_token=middle_token, suffix_token=suffix_token, eot_token=eot_token, fill_token=fill_token, sp_model_kwargs=self.sp_model_kwargs, suffix_first=suffix_first, clean_up_tokenization_spaces=clean_up_tokenization_spaces, additional_special_tokens=additional_special_tokens, use_default_system_prompt=use_default_system_prompt, **kwargs, ) @property def unk_token_length(self): return len(self.sp_model.encode(str(self.unk_token))) def get_spm_processor(self): tokenizer = spm.SentencePieceProcessor(**self.sp_model_kwargs) with open(self.vocab_file, "rb") as f: sp_model = f.read() model_pb2 = import_protobuf() model = model_pb2.ModelProto.FromString(sp_model) normalizer_spec = model_pb2.NormalizerSpec() normalizer_spec.add_dummy_prefix = False model.normalizer_spec.MergeFrom(normalizer_spec) sp_model = model.SerializeToString() tokenizer.LoadFromSerializedProto(sp_model) return tokenizer @property def prefix_token(self): return self._prefix_token @property def prefix_id(self): if self._prefix_token is None: return None return self.convert_tokens_to_ids(self.prefix_token) @property def middle_token(self): return self._middle_token @property def middle_id(self): if self._middle_token is None: return None return self.convert_tokens_to_ids(self.middle_token) @property def suffix_token(self): return self._suffix_token @property def suffix_id(self): if self._suffix_token is None: return None return self.convert_tokens_to_ids(self.suffix_token) @property def eot_token(self): return self._eot_token @property def eot_id(self): if self._eot_token is None: return None return self.convert_tokens_to_ids(self.eot_token) @property def vocab_size(self): """Returns vocab size""" return self.sp_model.get_piece_size() # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.get_vocab def get_vocab(self): """Returns vocab as a dict""" vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def tokenize(self, prefix, suffix=None, suffix_first=False, **kwargs) -> List[int]: # add a prefix space to `prefix` if self.fill_token is not None and self.fill_token in prefix and suffix is None: prefix, suffix = prefix.split(self.fill_token) if len(prefix) > 0: prefix = SPIECE_UNDERLINE + prefix.replace(SPIECE_UNDERLINE, " ") if suffix is None or len(suffix) < 1: tokens = super().tokenize(prefix, **kwargs) if len(tokens) > 1 and tokens[0] == SPIECE_UNDERLINE and tokens[1] in self.all_special_tokens: tokens = tokens[1:] return tokens prefix_tokens = self._tokenize(prefix) # prefix has an extra `SPIECE_UNDERLINE` if None in (self.prefix_id, self.middle_id, self.suffix_id): raise ValueError( "The input either includes a `prefix` and a `suffix` used for the infilling task," f" or can be split on the {self.fill_token} token, creating a suffix and prefix," " but the model does not support `infilling`." ) suffix_tokens = self._tokenize(suffix) # make sure CodeLlama sp model does not mess up suffix_first = suffix_first if suffix_first is not None else self.suffix_first if suffix_first: # format as " <PRE> <SUF>{suf} <MID> {pre}" return [self.prefix_token, self.suffix_token] + suffix_tokens + [self.middle_token] + prefix_tokens else: # format as " <PRE> {pre} <SUF>{suf} <MID>" return [self.prefix_token] + prefix_tokens + [self.suffix_token] + suffix_tokens + [self.middle_token] def _tokenize(self, text, **kwargs): """ Returns a tokenized string. We de-activated the `add_dummy_prefix` option, thus the sentencepiece internals will always strip any SPIECE_UNDERLINE. For example: `self.sp_model.encode(f"{SPIECE_UNDERLINE}Hey", out_type = str)` will give `['H', 'e', 'y']` instead of `['▁He', 'y']`. Thus we always encode `f"{unk_token}text"` and strip the `unk_token`. Here is an example with `unk_token = "<unk>"` and `unk_token_length = 4`. `self.tokenizer.sp_model.encode("<unk> Hey", out_type = str)[4:]`. """ tokens = self.sp_model.encode(text, out_type=str) if not text.startswith((SPIECE_UNDERLINE, " ")): return tokens # 1. Encode string + prefix ex: "<unk> Hey" tokens = self.sp_model.encode(self.unk_token + text, out_type=str) # 2. Remove self.unk_token from ['<','unk','>', '▁Hey'] return tokens[self.unk_token_length :] if len(tokens) >= self.unk_token_length else tokens # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer._convert_token_to_id def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.sp_model.piece_to_id(token) # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer._convert_id_to_token def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" token = self.sp_model.IdToPiece(index) return token def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" # since we manually add the prefix space, we have to remove it when decoding if tokens[0].startswith(SPIECE_UNDERLINE): tokens[0] = tokens[0][1:] current_sub_tokens = [] out_string = "" for _, token in enumerate(tokens): # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(current_sub_tokens) + token current_sub_tokens = [] else: current_sub_tokens.append(token) out_string += self.sp_model.decode(current_sub_tokens) return out_string # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.save_vocabulary def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]: """ Save the vocabulary and special tokens file to a directory. Args: save_directory (`str`): The directory in which to save the vocabulary. Returns: `Tuple(str)`: Paths to the files saved. """ if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, "wb") as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (out_vocab_file,) # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.build_inputs_with_special_tokens def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): bos_token_id = [self.bos_token_id] if self.add_bos_token else [] eos_token_id = [self.eos_token_id] if self.add_eos_token else [] output = bos_token_id + token_ids_0 + eos_token_id if token_ids_1 is not None: output = output + bos_token_id + token_ids_1 + eos_token_id return output # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.get_special_tokens_mask def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) bos_token_id = [1] if self.add_bos_token else [] eos_token_id = [1] if self.add_eos_token else [] if token_ids_1 is None: return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id return ( bos_token_id + ([0] * len(token_ids_0)) + eos_token_id + bos_token_id + ([0] * len(token_ids_1)) + eos_token_id ) # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.create_token_type_ids_from_sequences def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` if token_ids_1 is None, only returns the first portion of the mask (0s). Args: token_ids_0 (`List[int]`): List of ids. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """ bos_token_id = [self.bos_token_id] if self.add_bos_token else [] eos_token_id = [self.eos_token_id] if self.add_eos_token else [] output = [0] * len(bos_token_id + token_ids_0 + eos_token_id) if token_ids_1 is not None: output += [1] * len(bos_token_id + token_ids_1 + eos_token_id) return output @property # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.default_chat_template def default_chat_template(self): """ LLaMA uses [INST] and [/INST] to indicate user messages, and <<SYS>> and <</SYS>> to indicate system messages. Assistant messages do not have special tokens, because LLaMA chat models are generally trained with strict user/assistant/user/assistant message ordering, and so assistant messages can be identified from the ordering rather than needing special tokens. The system message is partly 'embedded' in the first user message, which results in an unusual token ordering when it is present. This template should definitely be changed if you wish to fine-tune a model with more flexible role ordering! The output should look something like: <bos>[INST] B_SYS SystemPrompt E_SYS Prompt [/INST] Answer <eos><bos>[INST] Prompt [/INST] Answer <eos> <bos>[INST] Prompt [/INST] The reference for this chat template is [this code snippet](https://github.com/facebookresearch/llama/blob/556949fdfb72da27c2f4a40b7f0e4cf0b8153a28/llama/generation.py#L320-L362) in the original repository. """ logger.warning_once( "\nNo chat template is defined for this tokenizer - using the default template " f"for the {self.__class__.__name__} class. If the default is not appropriate for " "your model, please set `tokenizer.chat_template` to an appropriate template. " "See https://huggingface.co/docs/transformers/main/chat_templating for more information.\n" ) template = ( "{% if messages[0]['role'] == 'system' %}" "{% set loop_messages = messages[1:] %}" # Extract system message if it's present "{% set system_message = messages[0]['content'] %}" "{% elif USE_DEFAULT_PROMPT == true and not '<<SYS>>' in messages[0]['content'] %}" "{% set loop_messages = messages %}" # Or use the default system message if the flag is set "{% set system_message = 'DEFAULT_SYSTEM_MESSAGE' %}" "{% else %}" "{% set loop_messages = messages %}" "{% set system_message = false %}" "{% endif %}" "{% for message in loop_messages %}" # Loop over all non-system messages "{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}" "{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}" "{% endif %}" "{% if loop.index0 == 0 and system_message != false %}" # Embed system message in first message "{% set content = '<<SYS>>\\n' + system_message + '\\n<</SYS>>\\n\\n' + message['content'] %}" "{% else %}" "{% set content = message['content'] %}" "{% endif %}" "{% if message['role'] == 'user' %}" # After all of that, handle messages/roles in a fairly normal way "{{ bos_token + '[INST] ' + content.strip() + ' [/INST]' }}" "{% elif message['role'] == 'system' %}" "{{ '<<SYS>>\\n' + content.strip() + '\\n<</SYS>>\\n\\n' }}" "{% elif message['role'] == 'assistant' %}" "{{ ' ' + content.strip() + ' ' + eos_token }}" "{% endif %}" "{% endfor %}" ) template = template.replace("USE_DEFAULT_PROMPT", "true" if self.use_default_system_prompt else "false") default_message = DEFAULT_SYSTEM_PROMPT.replace("\n", "\\n").replace("'", "\\'") template = template.replace("DEFAULT_SYSTEM_MESSAGE", default_message) return template def __getstate__(self): state = self.__dict__.copy() state["sp_model"] = None state["sp_model_proto"] = self.sp_model.serialized_model_proto() return state def __setstate__(self, d): self.__dict__ = d self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
transformers/src/transformers/models/code_llama/tokenization_code_llama.py/0
{ "file_path": "transformers/src/transformers/models/code_llama/tokenization_code_llama.py", "repo_id": "transformers", "token_count": 10023 }
329
# coding=utf-8 # Copyright 2022 Microsoft Research Asia and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Conditional DETR model.""" import math from dataclasses import dataclass from typing import Dict, List, Optional, Tuple, Union import torch from torch import Tensor, nn from ...activations import ACT2FN from ...modeling_attn_mask_utils import _prepare_4d_attention_mask from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithCrossAttentions, Seq2SeqModelOutput from ...modeling_utils import PreTrainedModel from ...utils import ( ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, is_accelerate_available, is_scipy_available, is_timm_available, is_vision_available, logging, replace_return_docstrings, requires_backends, ) from ...utils.backbone_utils import load_backbone from .configuration_conditional_detr import ConditionalDetrConfig if is_accelerate_available(): from accelerate import PartialState from accelerate.utils import reduce if is_scipy_available(): from scipy.optimize import linear_sum_assignment if is_timm_available(): from timm import create_model if is_vision_available(): from ...image_transforms import center_to_corners_format logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "ConditionalDetrConfig" _CHECKPOINT_FOR_DOC = "microsoft/conditional-detr-resnet-50" CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST = [ "microsoft/conditional-detr-resnet-50", # See all Conditional DETR models at https://huggingface.co/models?filter=conditional_detr ] @dataclass class ConditionalDetrDecoderOutput(BaseModelOutputWithCrossAttentions): """ Base class for outputs of the Conditional DETR decoder. This class adds one attribute to BaseModelOutputWithCrossAttentions, namely an optional stack of intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a layernorm. This is useful when training the model with auxiliary decoding losses. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. intermediate_hidden_states (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, num_queries, hidden_size)`, *optional*, returned when `config.auxiliary_loss=True`): Intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a layernorm. """ intermediate_hidden_states: Optional[torch.FloatTensor] = None reference_points: Optional[Tuple[torch.FloatTensor]] = None @dataclass class ConditionalDetrModelOutput(Seq2SeqModelOutput): """ Base class for outputs of the Conditional DETR encoder-decoder model. This class adds one attribute to Seq2SeqModelOutput, namely an optional stack of intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a layernorm. This is useful when training the model with auxiliary decoding losses. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the decoder of the model. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. intermediate_hidden_states (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, sequence_length, hidden_size)`, *optional*, returned when `config.auxiliary_loss=True`): Intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a layernorm. """ intermediate_hidden_states: Optional[torch.FloatTensor] = None reference_points: Optional[Tuple[torch.FloatTensor]] = None @dataclass # Copied from transformers.models.detr.modeling_detr.DetrObjectDetectionOutput with Detr->ConditionalDetr class ConditionalDetrObjectDetectionOutput(ModelOutput): """ Output type of [`ConditionalDetrForObjectDetection`]. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)): Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized scale-invariant IoU loss. loss_dict (`Dict`, *optional*): A dictionary containing the individual losses. Useful for logging. logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`): Classification logits (including no-object) for all queries. pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding). You can use [`~ConditionalDetrImageProcessor.post_process_object_detection`] to retrieve the unnormalized bounding boxes. auxiliary_outputs (`list[Dict]`, *optional*): Optional, only returned when auxilary losses are activated (i.e. `config.auxiliary_loss` is set to `True`) and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and `pred_boxes`) for each decoder layer. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the decoder of the model. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None loss_dict: Optional[Dict] = None logits: torch.FloatTensor = None pred_boxes: torch.FloatTensor = None auxiliary_outputs: Optional[List[Dict]] = None last_hidden_state: Optional[torch.FloatTensor] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass # Copied from transformers.models.detr.modeling_detr.DetrSegmentationOutput with Detr->ConditionalDetr class ConditionalDetrSegmentationOutput(ModelOutput): """ Output type of [`ConditionalDetrForSegmentation`]. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)): Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized scale-invariant IoU loss. loss_dict (`Dict`, *optional*): A dictionary containing the individual losses. Useful for logging. logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`): Classification logits (including no-object) for all queries. pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding). You can use [`~ConditionalDetrImageProcessor.post_process_object_detection`] to retrieve the unnormalized bounding boxes. pred_masks (`torch.FloatTensor` of shape `(batch_size, num_queries, height/4, width/4)`): Segmentation masks logits for all queries. See also [`~ConditionalDetrImageProcessor.post_process_semantic_segmentation`] or [`~ConditionalDetrImageProcessor.post_process_instance_segmentation`] [`~ConditionalDetrImageProcessor.post_process_panoptic_segmentation`] to evaluate semantic, instance and panoptic segmentation masks respectively. auxiliary_outputs (`list[Dict]`, *optional*): Optional, only returned when auxiliary losses are activated (i.e. `config.auxiliary_loss` is set to `True`) and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and `pred_boxes`) for each decoder layer. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the decoder of the model. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None loss_dict: Optional[Dict] = None logits: torch.FloatTensor = None pred_boxes: torch.FloatTensor = None pred_masks: torch.FloatTensor = None auxiliary_outputs: Optional[List[Dict]] = None last_hidden_state: Optional[torch.FloatTensor] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None # Copied from transformers.models.detr.modeling_detr.DetrFrozenBatchNorm2d with Detr->ConditionalDetr class ConditionalDetrFrozenBatchNorm2d(nn.Module): """ BatchNorm2d where the batch statistics and the affine parameters are fixed. Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than torchvision.models.resnet[18,34,50,101] produce nans. """ def __init__(self, n): super().__init__() self.register_buffer("weight", torch.ones(n)) self.register_buffer("bias", torch.zeros(n)) self.register_buffer("running_mean", torch.zeros(n)) self.register_buffer("running_var", torch.ones(n)) def _load_from_state_dict( self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ): num_batches_tracked_key = prefix + "num_batches_tracked" if num_batches_tracked_key in state_dict: del state_dict[num_batches_tracked_key] super()._load_from_state_dict( state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ) def forward(self, x): # move reshapes to the beginning # to make it user-friendly weight = self.weight.reshape(1, -1, 1, 1) bias = self.bias.reshape(1, -1, 1, 1) running_var = self.running_var.reshape(1, -1, 1, 1) running_mean = self.running_mean.reshape(1, -1, 1, 1) epsilon = 1e-5 scale = weight * (running_var + epsilon).rsqrt() bias = bias - running_mean * scale return x * scale + bias # Copied from transformers.models.detr.modeling_detr.replace_batch_norm with Detr->ConditionalDetr def replace_batch_norm(model): r""" Recursively replace all `torch.nn.BatchNorm2d` with `ConditionalDetrFrozenBatchNorm2d`. Args: model (torch.nn.Module): input model """ for name, module in model.named_children(): if isinstance(module, nn.BatchNorm2d): new_module = ConditionalDetrFrozenBatchNorm2d(module.num_features) if not module.weight.device == torch.device("meta"): new_module.weight.data.copy_(module.weight) new_module.bias.data.copy_(module.bias) new_module.running_mean.data.copy_(module.running_mean) new_module.running_var.data.copy_(module.running_var) model._modules[name] = new_module if len(list(module.children())) > 0: replace_batch_norm(module) # Copied from transformers.models.detr.modeling_detr.DetrConvEncoder class ConditionalDetrConvEncoder(nn.Module): """ Convolutional backbone, using either the AutoBackbone API or one from the timm library. nn.BatchNorm2d layers are replaced by DetrFrozenBatchNorm2d as defined above. """ def __init__(self, config): super().__init__() self.config = config if config.use_timm_backbone: requires_backends(self, ["timm"]) kwargs = {} if config.dilation: kwargs["output_stride"] = 16 backbone = create_model( config.backbone, pretrained=config.use_pretrained_backbone, features_only=True, out_indices=(1, 2, 3, 4), in_chans=config.num_channels, **kwargs, ) else: backbone = load_backbone(config) # replace batch norm by frozen batch norm with torch.no_grad(): replace_batch_norm(backbone) self.model = backbone self.intermediate_channel_sizes = ( self.model.feature_info.channels() if config.use_timm_backbone else self.model.channels ) backbone_model_type = config.backbone if config.use_timm_backbone else config.backbone_config.model_type if "resnet" in backbone_model_type: for name, parameter in self.model.named_parameters(): if config.use_timm_backbone: if "layer2" not in name and "layer3" not in name and "layer4" not in name: parameter.requires_grad_(False) else: if "stage.1" not in name and "stage.2" not in name and "stage.3" not in name: parameter.requires_grad_(False) def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor): # send pixel_values through the model to get list of feature maps features = self.model(pixel_values) if self.config.use_timm_backbone else self.model(pixel_values).feature_maps out = [] for feature_map in features: # downsample pixel_mask to match shape of corresponding feature_map mask = nn.functional.interpolate(pixel_mask[None].float(), size=feature_map.shape[-2:]).to(torch.bool)[0] out.append((feature_map, mask)) return out # Copied from transformers.models.detr.modeling_detr.DetrConvModel with Detr->ConditionalDetr class ConditionalDetrConvModel(nn.Module): """ This module adds 2D position embeddings to all intermediate feature maps of the convolutional encoder. """ def __init__(self, conv_encoder, position_embedding): super().__init__() self.conv_encoder = conv_encoder self.position_embedding = position_embedding def forward(self, pixel_values, pixel_mask): # send pixel_values and pixel_mask through backbone to get list of (feature_map, pixel_mask) tuples out = self.conv_encoder(pixel_values, pixel_mask) pos = [] for feature_map, mask in out: # position encoding pos.append(self.position_embedding(feature_map, mask).to(feature_map.dtype)) return out, pos class ConditionalDetrSinePositionEmbedding(nn.Module): """ This is a more standard version of the position embedding, very similar to the one used by the Attention is all you need paper, generalized to work on images. """ def __init__(self, embedding_dim=64, temperature=10000, normalize=False, scale=None): super().__init__() self.embedding_dim = embedding_dim self.temperature = temperature self.normalize = normalize if scale is not None and normalize is False: raise ValueError("normalize should be True if scale is passed") if scale is None: scale = 2 * math.pi self.scale = scale def forward(self, pixel_values, pixel_mask): if pixel_mask is None: raise ValueError("No pixel mask provided") y_embed = pixel_mask.cumsum(1, dtype=torch.float32) x_embed = pixel_mask.cumsum(2, dtype=torch.float32) if self.normalize: y_embed = y_embed / (y_embed[:, -1:, :] + 1e-6) * self.scale x_embed = x_embed / (x_embed[:, :, -1:] + 1e-6) * self.scale dim_t = torch.arange(self.embedding_dim, dtype=torch.int64, device=pixel_values.device).float() dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / self.embedding_dim) pos_x = x_embed[:, :, :, None] / dim_t pos_y = y_embed[:, :, :, None] / dim_t pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) return pos # Copied from transformers.models.detr.modeling_detr.DetrLearnedPositionEmbedding with Detr->ConditionalDetr class ConditionalDetrLearnedPositionEmbedding(nn.Module): """ This module learns positional embeddings up to a fixed maximum size. """ def __init__(self, embedding_dim=256): super().__init__() self.row_embeddings = nn.Embedding(50, embedding_dim) self.column_embeddings = nn.Embedding(50, embedding_dim) def forward(self, pixel_values, pixel_mask=None): height, width = pixel_values.shape[-2:] width_values = torch.arange(width, device=pixel_values.device) height_values = torch.arange(height, device=pixel_values.device) x_emb = self.column_embeddings(width_values) y_emb = self.row_embeddings(height_values) pos = torch.cat([x_emb.unsqueeze(0).repeat(height, 1, 1), y_emb.unsqueeze(1).repeat(1, width, 1)], dim=-1) pos = pos.permute(2, 0, 1) pos = pos.unsqueeze(0) pos = pos.repeat(pixel_values.shape[0], 1, 1, 1) return pos # Copied from transformers.models.detr.modeling_detr.build_position_encoding with Detr->ConditionalDetr def build_position_encoding(config): n_steps = config.d_model // 2 if config.position_embedding_type == "sine": # TODO find a better way of exposing other arguments position_embedding = ConditionalDetrSinePositionEmbedding(n_steps, normalize=True) elif config.position_embedding_type == "learned": position_embedding = ConditionalDetrLearnedPositionEmbedding(n_steps) else: raise ValueError(f"Not supported {config.position_embedding_type}") return position_embedding # function to generate sine positional embedding for 2d coordinates def gen_sine_position_embeddings(pos_tensor, d_model): scale = 2 * math.pi dim = d_model // 2 dim_t = torch.arange(dim, dtype=torch.float32, device=pos_tensor.device) dim_t = 10000 ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / dim) x_embed = pos_tensor[:, :, 0] * scale y_embed = pos_tensor[:, :, 1] * scale pos_x = x_embed[:, :, None] / dim_t pos_y = y_embed[:, :, None] / dim_t pos_x = torch.stack((pos_x[:, :, 0::2].sin(), pos_x[:, :, 1::2].cos()), dim=3).flatten(2) pos_y = torch.stack((pos_y[:, :, 0::2].sin(), pos_y[:, :, 1::2].cos()), dim=3).flatten(2) pos = torch.cat((pos_y, pos_x), dim=2) return pos def inverse_sigmoid(x, eps=1e-5): x = x.clamp(min=0, max=1) x1 = x.clamp(min=eps) x2 = (1 - x).clamp(min=eps) return torch.log(x1 / x2) # Copied from transformers.models.detr.modeling_detr.DetrAttention class DetrAttention(nn.Module): """ Multi-headed attention from 'Attention Is All You Need' paper. Here, we add position embeddings to the queries and keys (as explained in the DETR paper). """ def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, bias: bool = True, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads if self.head_dim * num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {num_heads})." ) self.scaling = self.head_dim**-0.5 self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int): return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def with_pos_embed(self, tensor: torch.Tensor, object_queries: Optional[Tensor], **kwargs): position_embeddings = kwargs.pop("position_embeddings", None) if kwargs: raise ValueError(f"Unexpected arguments {kwargs.keys()}") if position_embeddings is not None and object_queries is not None: raise ValueError( "Cannot specify both position_embeddings and object_queries. Please use just object_queries" ) if position_embeddings is not None: logger.warning_once( "position_embeddings has been deprecated and will be removed in v4.34. Please use object_queries instead" ) object_queries = position_embeddings return tensor if object_queries is None else tensor + object_queries def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, object_queries: Optional[torch.Tensor] = None, key_value_states: Optional[torch.Tensor] = None, spatial_position_embeddings: Optional[torch.Tensor] = None, output_attentions: bool = False, **kwargs, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" position_embeddings = kwargs.pop("position_ebmeddings", None) key_value_position_embeddings = kwargs.pop("key_value_position_embeddings", None) if kwargs: raise ValueError(f"Unexpected arguments {kwargs.keys()}") if position_embeddings is not None and object_queries is not None: raise ValueError( "Cannot specify both position_embeddings and object_queries. Please use just object_queries" ) if key_value_position_embeddings is not None and spatial_position_embeddings is not None: raise ValueError( "Cannot specify both key_value_position_embeddings and spatial_position_embeddings. Please use just spatial_position_embeddings" ) if position_embeddings is not None: logger.warning_once( "position_embeddings has been deprecated and will be removed in v4.34. Please use object_queries instead" ) object_queries = position_embeddings if key_value_position_embeddings is not None: logger.warning_once( "key_value_position_embeddings has been deprecated and will be removed in v4.34. Please use spatial_position_embeddings instead" ) spatial_position_embeddings = key_value_position_embeddings # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None batch_size, target_len, embed_dim = hidden_states.size() # add position embeddings to the hidden states before projecting to queries and keys if object_queries is not None: hidden_states_original = hidden_states hidden_states = self.with_pos_embed(hidden_states, object_queries) # add key-value position embeddings to the key value states if spatial_position_embeddings is not None: key_value_states_original = key_value_states key_value_states = self.with_pos_embed(key_value_states, spatial_position_embeddings) # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj if is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, batch_size) value_states = self._shape(self.v_proj(key_value_states_original), -1, batch_size) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, batch_size) value_states = self._shape(self.v_proj(hidden_states_original), -1, batch_size) proj_shape = (batch_size * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, target_len, batch_size).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) source_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (batch_size * self.num_heads, target_len, source_len): raise ValueError( f"Attention weights should be of size {(batch_size * self.num_heads, target_len, source_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (batch_size, 1, target_len, source_len): raise ValueError( f"Attention mask should be of size {(batch_size, 1, target_len, source_len)}, but is" f" {attention_mask.size()}" ) attn_weights = attn_weights.view(batch_size, self.num_heads, target_len, source_len) + attention_mask attn_weights = attn_weights.view(batch_size * self.num_heads, target_len, source_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(batch_size, self.num_heads, target_len, source_len) attn_weights = attn_weights_reshaped.view(batch_size * self.num_heads, target_len, source_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (batch_size * self.num_heads, target_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(batch_size, self.num_heads, target_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(batch_size, self.num_heads, target_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(batch_size, target_len, embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped class ConditionalDetrAttention(nn.Module): """ Cross-Attention used in Conditional DETR 'Conditional DETR for Fast Training Convergence' paper. The key q_proj, k_proj, v_proj are defined outside the attention. This attention allows the dim of q, k to be different to v. """ def __init__( self, embed_dim: int, out_dim: int, num_heads: int, dropout: float = 0.0, bias: bool = True, ): super().__init__() self.embed_dim = embed_dim self.out_dim = out_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads if self.head_dim * num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {num_heads})." ) # head dimension of values self.v_head_dim = out_dim // num_heads if self.v_head_dim * num_heads != self.out_dim: raise ValueError( f"out_dim must be divisible by num_heads (got `out_dim`: {self.out_dim} and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.out_proj = nn.Linear(out_dim, out_dim, bias=bias) def _qk_shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int): return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def _v_shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int): return tensor.view(batch_size, seq_len, self.num_heads, self.v_head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, key_states: Optional[torch.Tensor] = None, value_states: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" batch_size, target_len, _ = hidden_states.size() # get query proj query_states = hidden_states * self.scaling # get key, value proj key_states = self._qk_shape(key_states, -1, batch_size) value_states = self._v_shape(value_states, -1, batch_size) proj_shape = (batch_size * self.num_heads, -1, self.head_dim) v_proj_shape = (batch_size * self.num_heads, -1, self.v_head_dim) query_states = self._qk_shape(query_states, target_len, batch_size).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*v_proj_shape) source_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (batch_size * self.num_heads, target_len, source_len): raise ValueError( f"Attention weights should be of size {(batch_size * self.num_heads, target_len, source_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (batch_size, 1, target_len, source_len): raise ValueError( f"Attention mask should be of size {(batch_size, 1, target_len, source_len)}, but is" f" {attention_mask.size()}" ) attn_weights = attn_weights.view(batch_size, self.num_heads, target_len, source_len) + attention_mask attn_weights = attn_weights.view(batch_size * self.num_heads, target_len, source_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(batch_size, self.num_heads, target_len, source_len) attn_weights = attn_weights_reshaped.view(batch_size * self.num_heads, target_len, source_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (batch_size * self.num_heads, target_len, self.v_head_dim): raise ValueError( f"`attn_output` should be of size {(batch_size, self.num_heads, target_len, self.v_head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(batch_size, self.num_heads, target_len, self.v_head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(batch_size, target_len, self.out_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped # Copied from transformers.models.detr.modeling_detr.DetrEncoderLayer with DetrEncoderLayer->ConditionalDetrEncoderLayer,DetrConfig->ConditionalDetrConfig class ConditionalDetrEncoderLayer(nn.Module): def __init__(self, config: ConditionalDetrConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = DetrAttention( embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, ) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, object_queries: torch.Tensor = None, output_attentions: bool = False, **kwargs, ): """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative values. object_queries (`torch.FloatTensor`, *optional*): Object queries (also called content embeddings), to be added to the hidden states. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ position_embeddings = kwargs.pop("position_embeddings", None) if kwargs: raise ValueError(f"Unexpected arguments {kwargs.keys()}") if position_embeddings is not None and object_queries is not None: raise ValueError( "Cannot specify both position_embeddings and object_queries. Please use just object_queries" ) if position_embeddings is not None: logger.warning_once( "position_embeddings has been deprecated and will be removed in v4.34. Please use object_queries instead" ) object_queries = position_embeddings residual = hidden_states hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, object_queries=object_queries, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) if self.training: if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class ConditionalDetrDecoderLayer(nn.Module): def __init__(self, config: ConditionalDetrConfig): super().__init__() self.embed_dim = config.d_model d_model = config.d_model # Decoder Self-Attention projections self.sa_qcontent_proj = nn.Linear(d_model, d_model) self.sa_qpos_proj = nn.Linear(d_model, d_model) self.sa_kcontent_proj = nn.Linear(d_model, d_model) self.sa_kpos_proj = nn.Linear(d_model, d_model) self.sa_v_proj = nn.Linear(d_model, d_model) self.self_attn = ConditionalDetrAttention( embed_dim=self.embed_dim, out_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) # Decoder Cross-Attention projections self.ca_qcontent_proj = nn.Linear(d_model, d_model) self.ca_qpos_proj = nn.Linear(d_model, d_model) self.ca_kcontent_proj = nn.Linear(d_model, d_model) self.ca_kpos_proj = nn.Linear(d_model, d_model) self.ca_v_proj = nn.Linear(d_model, d_model) self.ca_qpos_sine_proj = nn.Linear(d_model, d_model) self.encoder_attn = ConditionalDetrAttention( self.embed_dim * 2, self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) self.nhead = config.decoder_attention_heads def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, object_queries: Optional[torch.Tensor] = None, query_position_embeddings: Optional[torch.Tensor] = None, query_sine_embed: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, is_first: Optional[bool] = False, **kwargs, ): """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative values. object_queries (`torch.FloatTensor`, *optional*): object_queries that are added to the queries and keys in the cross-attention layer. query_position_embeddings (`torch.FloatTensor`, *optional*): object_queries that are added to the queries and keys in the self-attention layer. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative values. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ position_embeddings = kwargs.pop("position_embeddings", None) if kwargs: raise ValueError(f"Unexpected arguments {kwargs.keys()}") if position_embeddings is not None and object_queries is not None: raise ValueError( "Cannot specify both position_embeddings and object_queries. Please use just object_queries" ) if position_embeddings is not None: logger.warning_once( "position_embeddings has been deprecated and will be removed in v4.34. Please use object_queries instead" ) object_queries = position_embeddings residual = hidden_states # ========== Begin of Self-Attention ============= # Apply projections here # shape: num_queries x batch_size x 256 q_content = self.sa_qcontent_proj( hidden_states ) # target is the input of the first decoder layer. zero by default. q_pos = self.sa_qpos_proj(query_position_embeddings) k_content = self.sa_kcontent_proj(hidden_states) k_pos = self.sa_kpos_proj(query_position_embeddings) v = self.sa_v_proj(hidden_states) _, num_queries, n_model = q_content.shape q = q_content + q_pos k = k_content + k_pos hidden_states, self_attn_weights = self.self_attn( hidden_states=q, attention_mask=attention_mask, key_states=k, value_states=v, output_attentions=output_attentions, ) # ============ End of Self-Attention ============= hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # ========== Begin of Cross-Attention ============= # Apply projections here # shape: num_queries x batch_size x 256 q_content = self.ca_qcontent_proj(hidden_states) k_content = self.ca_kcontent_proj(encoder_hidden_states) v = self.ca_v_proj(encoder_hidden_states) batch_size, num_queries, n_model = q_content.shape _, source_len, _ = k_content.shape k_pos = self.ca_kpos_proj(object_queries) # For the first decoder layer, we concatenate the positional embedding predicted from # the object query (the positional embedding) into the original query (key) in DETR. if is_first: q_pos = self.ca_qpos_proj(query_position_embeddings) q = q_content + q_pos k = k_content + k_pos else: q = q_content k = k_content q = q.view(batch_size, num_queries, self.nhead, n_model // self.nhead) query_sine_embed = self.ca_qpos_sine_proj(query_sine_embed) query_sine_embed = query_sine_embed.view(batch_size, num_queries, self.nhead, n_model // self.nhead) q = torch.cat([q, query_sine_embed], dim=3).view(batch_size, num_queries, n_model * 2) k = k.view(batch_size, source_len, self.nhead, n_model // self.nhead) k_pos = k_pos.view(batch_size, source_len, self.nhead, n_model // self.nhead) k = torch.cat([k, k_pos], dim=3).view(batch_size, source_len, n_model * 2) # Cross-Attention Block cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states, cross_attn_weights = self.encoder_attn( hidden_states=q, attention_mask=encoder_attention_mask, key_states=k, value_states=v, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) # ============ End of Cross-Attention ============= # Fully Connected residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) return outputs # Copied from transformers.models.detr.modeling_detr.DetrClassificationHead with Detr->ConditionalDetr class ConditionalDetrClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, input_dim: int, inner_dim: int, num_classes: int, pooler_dropout: float): super().__init__() self.dense = nn.Linear(input_dim, inner_dim) self.dropout = nn.Dropout(p=pooler_dropout) self.out_proj = nn.Linear(inner_dim, num_classes) def forward(self, hidden_states: torch.Tensor): hidden_states = self.dropout(hidden_states) hidden_states = self.dense(hidden_states) hidden_states = torch.tanh(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.out_proj(hidden_states) return hidden_states # Copied from transformers.models.detr.modeling_detr.DetrMLPPredictionHead with DetrMLPPredictionHead->MLP class MLP(nn.Module): """ Very simple multi-layer perceptron (MLP, also called FFN), used to predict the normalized center coordinates, height and width of a bounding box w.r.t. an image. Copied from https://github.com/facebookresearch/detr/blob/master/models/detr.py """ def __init__(self, input_dim, hidden_dim, output_dim, num_layers): super().__init__() self.num_layers = num_layers h = [hidden_dim] * (num_layers - 1) self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) def forward(self, x): for i, layer in enumerate(self.layers): x = nn.functional.relu(layer(x)) if i < self.num_layers - 1 else layer(x) return x # Copied from transformers.models.detr.modeling_detr.DetrPreTrainedModel with Detr->ConditionalDetr class ConditionalDetrPreTrainedModel(PreTrainedModel): config_class = ConditionalDetrConfig base_model_prefix = "model" main_input_name = "pixel_values" _no_split_modules = [r"ConditionalDetrConvEncoder", r"ConditionalDetrEncoderLayer", r"ConditionalDetrDecoderLayer"] def _init_weights(self, module): std = self.config.init_std xavier_std = self.config.init_xavier_std if isinstance(module, ConditionalDetrMHAttentionMap): nn.init.zeros_(module.k_linear.bias) nn.init.zeros_(module.q_linear.bias) nn.init.xavier_uniform_(module.k_linear.weight, gain=xavier_std) nn.init.xavier_uniform_(module.q_linear.weight, gain=xavier_std) elif isinstance(module, ConditionalDetrLearnedPositionEmbedding): nn.init.uniform_(module.row_embeddings.weight) nn.init.uniform_(module.column_embeddings.weight) if isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() CONDITIONAL_DETR_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ConditionalDetrConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ CONDITIONAL_DETR_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConditionalDetrImageProcessor.__call__`] for details. pixel_mask (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*): Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). [What are attention masks?](../glossary#attention-mask) decoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, num_queries)`, *optional*): Not used by default. Can be used to mask object queries. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you can choose to directly pass a flattened representation of an image. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*): Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an embedded representation. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ # Copied from transformers.models.detr.modeling_detr.DetrEncoder with Detr->ConditionalDetr,DETR->ConditionalDETR class ConditionalDetrEncoder(ConditionalDetrPreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`ConditionalDetrEncoderLayer`]. The encoder updates the flattened feature map through multiple self-attention layers. Small tweak for ConditionalDETR: - object_queries are added to the forward pass. Args: config: ConditionalDetrConfig """ def __init__(self, config: ConditionalDetrConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop self.layers = nn.ModuleList([ConditionalDetrEncoderLayer(config) for _ in range(config.encoder_layers)]) # in the original ConditionalDETR, no layernorm is used at the end of the encoder, as "normalize_before" is set to False by default # Initialize weights and apply final processing self.post_init() def forward( self, inputs_embeds=None, attention_mask=None, object_queries=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs, ): r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Flattened feature map (output of the backbone + projection layer) that is passed to the encoder. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding pixel features. Mask values selected in `[0, 1]`: - 1 for pixel features that are real (i.e. **not masked**), - 0 for pixel features that are padding (i.e. **masked**). [What are attention masks?](../glossary#attention-mask) object_queries (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Object queries that are added to the queries in each self-attention layer. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ position_embeddings = kwargs.pop("position_embeddings", None) if kwargs: raise ValueError(f"Unexpected arguments {kwargs.keys()}") if position_embeddings is not None and object_queries is not None: raise ValueError( "Cannot specify both position_embeddings and object_queries. Please use just object_queries" ) if position_embeddings is not None: logger.warning_once( "position_embeddings has been deprecated and will be removed in v4.34. Please use object_queries instead" ) object_queries = position_embeddings output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict hidden_states = inputs_embeds hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) # expand attention_mask if attention_mask is not None: # [batch_size, seq_len] -> [batch_size, 1, target_seq_len, source_seq_len] attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for i, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) to_drop = False if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: # skip the layer to_drop = True if to_drop: layer_outputs = (None, None) else: # we add object_queries as extra input to the encoder_layer layer_outputs = encoder_layer( hidden_states, attention_mask, object_queries=object_queries, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) class ConditionalDetrDecoder(ConditionalDetrPreTrainedModel): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`ConditionalDetrDecoderLayer`]. The decoder updates the query embeddings through multiple self-attention and cross-attention layers. Some small tweaks for Conditional DETR: - object_queries and query_position_embeddings are added to the forward pass. - if self.config.auxiliary_loss is set to True, also returns a stack of activations from all decoding layers. Args: config: ConditionalDetrConfig """ def __init__(self, config: ConditionalDetrConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.layers = nn.ModuleList([ConditionalDetrDecoderLayer(config) for _ in range(config.decoder_layers)]) # in Conditional DETR, the decoder uses layernorm after the last decoder layer output self.layernorm = nn.LayerNorm(config.d_model) d_model = config.d_model self.gradient_checkpointing = False # query_scale is the FFN applied on f to generate transformation T self.query_scale = MLP(d_model, d_model, d_model, 2) self.ref_point_head = MLP(d_model, d_model, 2, 2) for layer_id in range(config.decoder_layers - 1): self.layers[layer_id + 1].ca_qpos_proj = None # Initialize weights and apply final processing self.post_init() def forward( self, inputs_embeds=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, object_queries=None, query_position_embeddings=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs, ): r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): The query embeddings that are passed into the decoder. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on certain queries. Mask values selected in `[0, 1]`: - 1 for queries that are **not masked**, - 0 for queries that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding pixel_values of the encoder. Mask values selected in `[0, 1]`: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). object_queries (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Position embeddings that are added to the queries and keys in each cross-attention layer. query_position_embeddings (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`): , *optional*): Position embeddings that are added to the queries and keys in each self-attention layer. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ position_embeddings = kwargs.pop("position_embeddings", None) if kwargs: raise ValueError(f"Unexpected arguments {kwargs.keys()}") if position_embeddings is not None and object_queries is not None: raise ValueError( "Cannot specify both position_embeddings and object_queries. Please use just object_queries" ) if position_embeddings is not None: logger.warning_once( "position_embeddings has been deprecated and will be removed in v4.34. Please use object_queries instead" ) object_queries = position_embeddings output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if inputs_embeds is not None: hidden_states = inputs_embeds input_shape = inputs_embeds.size()[:-1] # expand encoder attention mask if encoder_hidden_states is not None and encoder_attention_mask is not None: # [batch_size, seq_len] -> [batch_size, 1, target_seq_len, source_seq_len] encoder_attention_mask = _prepare_4d_attention_mask( encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] ) # optional intermediate hidden states intermediate = () if self.config.auxiliary_loss else None # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None reference_points_before_sigmoid = self.ref_point_head( query_position_embeddings ) # [num_queries, batch_size, 2] reference_points = reference_points_before_sigmoid.sigmoid().transpose(0, 1) obj_center = reference_points[..., :2].transpose(0, 1) # get sine embedding for the query vector query_sine_embed_before_transformation = gen_sine_position_embeddings(obj_center, self.config.d_model) for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue if idx == 0: pos_transformation = 1 else: pos_transformation = self.query_scale(hidden_states) # apply transformation query_sine_embed = query_sine_embed_before_transformation * pos_transformation if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( decoder_layer.__call__, hidden_states, None, object_queries, query_position_embeddings, query_sine_embed, encoder_hidden_states, encoder_attention_mask, None, None, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=None, object_queries=object_queries, query_position_embeddings=query_position_embeddings, query_sine_embed=query_sine_embed, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, is_first=(idx == 0), ) hidden_states = layer_outputs[0] if self.config.auxiliary_loss: hidden_states = self.layernorm(hidden_states) intermediate += (hidden_states,) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) # finally, apply layernorm hidden_states = self.layernorm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) # stack intermediate decoder activations if self.config.auxiliary_loss: intermediate = torch.stack(intermediate) if not return_dict: return tuple( v for v in [ hidden_states, all_hidden_states, all_self_attns, all_cross_attentions, intermediate, reference_points, ] if v is not None ) return ConditionalDetrDecoderOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, intermediate_hidden_states=intermediate, reference_points=reference_points, ) @add_start_docstrings( """ The bare Conditional DETR Model (consisting of a backbone and encoder-decoder Transformer) outputting raw hidden-states without any specific head on top. """, CONDITIONAL_DETR_START_DOCSTRING, ) class ConditionalDetrModel(ConditionalDetrPreTrainedModel): def __init__(self, config: ConditionalDetrConfig): super().__init__(config) # Create backbone + positional encoding backbone = ConditionalDetrConvEncoder(config) object_queries = build_position_encoding(config) self.backbone = ConditionalDetrConvModel(backbone, object_queries) # Create projection layer self.input_projection = nn.Conv2d(backbone.intermediate_channel_sizes[-1], config.d_model, kernel_size=1) self.query_position_embeddings = nn.Embedding(config.num_queries, config.d_model) self.encoder = ConditionalDetrEncoder(config) self.decoder = ConditionalDetrDecoder(config) # Initialize weights and apply final processing self.post_init() def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def freeze_backbone(self): for name, param in self.backbone.conv_encoder.model.named_parameters(): param.requires_grad_(False) def unfreeze_backbone(self): for name, param in self.backbone.conv_encoder.model.named_parameters(): param.requires_grad_(True) @add_start_docstrings_to_model_forward(CONDITIONAL_DETR_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=ConditionalDetrModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: torch.FloatTensor, pixel_mask: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, encoder_outputs: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], ConditionalDetrModelOutput]: r""" Returns: Examples: ```python >>> from transformers import AutoImageProcessor, AutoModel >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50") >>> model = AutoModel.from_pretrained("microsoft/conditional-detr-resnet-50") >>> # prepare image for the model >>> inputs = image_processor(images=image, return_tensors="pt") >>> # forward pass >>> outputs = model(**inputs) >>> # the last hidden states are the final query embeddings of the Transformer decoder >>> # these are of shape (batch_size, num_queries, hidden_size) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) [1, 300, 256] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict batch_size, num_channels, height, width = pixel_values.shape device = pixel_values.device if pixel_mask is None: pixel_mask = torch.ones(((batch_size, height, width)), device=device) # First, sent pixel_values + pixel_mask through Backbone to obtain the features # pixel_values should be of shape (batch_size, num_channels, height, width) # pixel_mask should be of shape (batch_size, height, width) features, object_queries_list = self.backbone(pixel_values, pixel_mask) # get final feature map and downsampled mask feature_map, mask = features[-1] if mask is None: raise ValueError("Backbone does not return downsampled pixel mask") # Second, apply 1x1 convolution to reduce the channel dimension to d_model (256 by default) projected_feature_map = self.input_projection(feature_map) # Third, flatten the feature map + object_queries of shape NxCxHxW to NxCxHW, and permute it to NxHWxC # In other words, turn their shape into (batch_size, sequence_length, hidden_size) flattened_features = projected_feature_map.flatten(2).permute(0, 2, 1) object_queries = object_queries_list[-1].flatten(2).permute(0, 2, 1) flattened_mask = mask.flatten(1) # Fourth, sent flattened_features + flattened_mask + object_queries through encoder # flattened_features is a Tensor of shape (batch_size, heigth*width, hidden_size) # flattened_mask is a Tensor of shape (batch_size, heigth*width) if encoder_outputs is None: encoder_outputs = self.encoder( inputs_embeds=flattened_features, attention_mask=flattened_mask, object_queries=object_queries, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # Fifth, sent query embeddings + object_queries through the decoder (which is conditioned on the encoder output) query_position_embeddings = self.query_position_embeddings.weight.unsqueeze(0).repeat(batch_size, 1, 1) queries = torch.zeros_like(query_position_embeddings) # decoder outputs consists of (dec_features, dec_hidden, dec_attn) decoder_outputs = self.decoder( inputs_embeds=queries, attention_mask=None, object_queries=object_queries, query_position_embeddings=query_position_embeddings, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=flattened_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs return ConditionalDetrModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, intermediate_hidden_states=decoder_outputs.intermediate_hidden_states, reference_points=decoder_outputs.reference_points, ) @add_start_docstrings( """ CONDITIONAL_DETR Model (consisting of a backbone and encoder-decoder Transformer) with object detection heads on top, for tasks such as COCO detection. """, CONDITIONAL_DETR_START_DOCSTRING, ) class ConditionalDetrForObjectDetection(ConditionalDetrPreTrainedModel): def __init__(self, config: ConditionalDetrConfig): super().__init__(config) # CONDITIONAL DETR encoder-decoder model self.model = ConditionalDetrModel(config) # Object detection heads self.class_labels_classifier = nn.Linear( config.d_model, config.num_labels ) # We add one for the "no object" class self.bbox_predictor = ConditionalDetrMLPPredictionHead( input_dim=config.d_model, hidden_dim=config.d_model, output_dim=4, num_layers=3 ) # Initialize weights and apply final processing self.post_init() # taken from https://github.com/Atten4Vis/conditionalDETR/blob/master/models/conditional_detr.py @torch.jit.unused def _set_aux_loss(self, outputs_class, outputs_coord): # this is a workaround to make torchscript happy, as torchscript # doesn't support dictionary with non-homogeneous values, such # as a dict having both a Tensor and a list. return [{"logits": a, "pred_boxes": b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1])] @add_start_docstrings_to_model_forward(CONDITIONAL_DETR_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=ConditionalDetrObjectDetectionOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: torch.FloatTensor, pixel_mask: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, encoder_outputs: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[List[dict]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], ConditionalDetrObjectDetectionOutput]: r""" labels (`List[Dict]` of len `(batch_size,)`, *optional*): Labels for computing the bipartite matching loss. List of dicts, each dictionary containing at least the following 2 keys: 'class_labels' and 'boxes' (the class labels and bounding boxes of an image in the batch respectively). The class labels themselves should be a `torch.LongTensor` of len `(number of bounding boxes in the image,)` and the boxes a `torch.FloatTensor` of shape `(number of bounding boxes in the image, 4)`. Returns: Examples: ```python >>> from transformers import AutoImageProcessor, AutoModelForObjectDetection >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50") >>> model = AutoModelForObjectDetection.from_pretrained("microsoft/conditional-detr-resnet-50") >>> inputs = image_processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> # convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax) >>> target_sizes = torch.tensor([image.size[::-1]]) >>> results = image_processor.post_process_object_detection(outputs, threshold=0.5, target_sizes=target_sizes)[ ... 0 ... ] >>> for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): ... box = [round(i, 2) for i in box.tolist()] ... print( ... f"Detected {model.config.id2label[label.item()]} with confidence " ... f"{round(score.item(), 3)} at location {box}" ... ) Detected remote with confidence 0.833 at location [38.31, 72.1, 177.63, 118.45] Detected cat with confidence 0.831 at location [9.2, 51.38, 321.13, 469.0] Detected cat with confidence 0.804 at location [340.3, 16.85, 642.93, 370.95] Detected remote with confidence 0.683 at location [334.48, 73.49, 366.37, 190.01] Detected couch with confidence 0.535 at location [0.52, 1.19, 640.35, 475.1] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict # First, sent images through CONDITIONAL_DETR base model to obtain encoder + decoder outputs outputs = self.model( pixel_values, pixel_mask=pixel_mask, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] # class logits + predicted bounding boxes logits = self.class_labels_classifier(sequence_output) reference = outputs.reference_points if return_dict else outputs[-1] reference_before_sigmoid = inverse_sigmoid(reference).transpose(0, 1) outputs_coords = [] hs = sequence_output tmp = self.bbox_predictor(hs) tmp[..., :2] += reference_before_sigmoid pred_boxes = tmp.sigmoid() # pred_boxes = self.bbox_predictor(sequence_output).sigmoid() loss, loss_dict, auxiliary_outputs = None, None, None if labels is not None: # First: create the matcher matcher = ConditionalDetrHungarianMatcher( class_cost=self.config.class_cost, bbox_cost=self.config.bbox_cost, giou_cost=self.config.giou_cost ) # Second: create the criterion losses = ["labels", "boxes", "cardinality"] criterion = ConditionalDetrLoss( matcher=matcher, num_classes=self.config.num_labels, focal_alpha=self.config.focal_alpha, losses=losses, ) criterion.to(self.device) # Third: compute the losses, based on outputs and labels outputs_loss = {} outputs_loss["logits"] = logits outputs_loss["pred_boxes"] = pred_boxes if self.config.auxiliary_loss: intermediate = outputs.intermediate_hidden_states if return_dict else outputs[4] outputs_class = self.class_labels_classifier(intermediate) for lvl in range(intermediate.shape[0]): tmp = self.bbox_predictor(intermediate[lvl]) tmp[..., :2] += reference_before_sigmoid outputs_coord = tmp.sigmoid() outputs_coords.append(outputs_coord) outputs_coord = torch.stack(outputs_coords) auxiliary_outputs = self._set_aux_loss(outputs_class, outputs_coord) outputs_loss["auxiliary_outputs"] = auxiliary_outputs loss_dict = criterion(outputs_loss, labels) # Fourth: compute total loss, as a weighted sum of the various losses weight_dict = {"loss_ce": self.config.cls_loss_coefficient, "loss_bbox": self.config.bbox_loss_coefficient} weight_dict["loss_giou"] = self.config.giou_loss_coefficient if self.config.auxiliary_loss: aux_weight_dict = {} for i in range(self.config.decoder_layers - 1): aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()}) weight_dict.update(aux_weight_dict) loss = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict) if not return_dict: if auxiliary_outputs is not None: output = (logits, pred_boxes) + auxiliary_outputs + outputs else: output = (logits, pred_boxes) + outputs return ((loss, loss_dict) + output) if loss is not None else output return ConditionalDetrObjectDetectionOutput( loss=loss, loss_dict=loss_dict, logits=logits, pred_boxes=pred_boxes, auxiliary_outputs=auxiliary_outputs, last_hidden_state=outputs.last_hidden_state, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) @add_start_docstrings( """ CONDITIONAL_DETR Model (consisting of a backbone and encoder-decoder Transformer) with a segmentation head on top, for tasks such as COCO panoptic. """, CONDITIONAL_DETR_START_DOCSTRING, ) class ConditionalDetrForSegmentation(ConditionalDetrPreTrainedModel): def __init__(self, config: ConditionalDetrConfig): super().__init__(config) # object detection model self.conditional_detr = ConditionalDetrForObjectDetection(config) # segmentation head hidden_size, number_of_heads = config.d_model, config.encoder_attention_heads intermediate_channel_sizes = self.conditional_detr.model.backbone.conv_encoder.intermediate_channel_sizes self.mask_head = ConditionalDetrMaskHeadSmallConv( hidden_size + number_of_heads, intermediate_channel_sizes[::-1][-3:], hidden_size ) self.bbox_attention = ConditionalDetrMHAttentionMap( hidden_size, hidden_size, number_of_heads, dropout=0.0, std=config.init_xavier_std ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(CONDITIONAL_DETR_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=ConditionalDetrSegmentationOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: torch.FloatTensor, pixel_mask: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.FloatTensor] = None, encoder_outputs: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[List[dict]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], ConditionalDetrSegmentationOutput]: r""" labels (`List[Dict]` of len `(batch_size,)`, *optional*): Labels for computing the bipartite matching loss, DICE/F-1 loss and Focal loss. List of dicts, each dictionary containing at least the following 3 keys: 'class_labels', 'boxes' and 'masks' (the class labels, bounding boxes and segmentation masks of an image in the batch respectively). The class labels themselves should be a `torch.LongTensor` of len `(number of bounding boxes in the image,)`, the boxes a `torch.FloatTensor` of shape `(number of bounding boxes in the image, 4)` and the masks a `torch.FloatTensor` of shape `(number of bounding boxes in the image, height, width)`. Returns: Examples: ```python >>> import io >>> import requests >>> from PIL import Image >>> import torch >>> import numpy >>> from transformers import ( ... AutoImageProcessor, ... ConditionalDetrConfig, ... ConditionalDetrForSegmentation, ... ) >>> from transformers.image_transforms import rgb_to_id >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50") >>> # randomly initialize all weights of the model >>> config = ConditionalDetrConfig() >>> model = ConditionalDetrForSegmentation(config) >>> # prepare image for the model >>> inputs = image_processor(images=image, return_tensors="pt") >>> # forward pass >>> outputs = model(**inputs) >>> # Use the `post_process_panoptic_segmentation` method of the `image_processor` to retrieve post-processed panoptic segmentation maps >>> # Segmentation results are returned as a list of dictionaries >>> result = image_processor.post_process_panoptic_segmentation(outputs, target_sizes=[(300, 500)]) >>> # A tensor of shape (height, width) where each value denotes a segment id, filled with -1 if no segment is found >>> panoptic_seg = result[0]["segmentation"] >>> # Get prediction score and segment_id to class_id mapping of each segment >>> panoptic_segments_info = result[0]["segments_info"] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict batch_size, num_channels, height, width = pixel_values.shape device = pixel_values.device if pixel_mask is None: pixel_mask = torch.ones((batch_size, height, width), device=device) # First, get list of feature maps and object_queries features, object_queries_list = self.conditional_detr.model.backbone(pixel_values, pixel_mask=pixel_mask) # Second, apply 1x1 convolution to reduce the channel dimension to d_model (256 by default) feature_map, mask = features[-1] batch_size, num_channels, height, width = feature_map.shape projected_feature_map = self.conditional_detr.model.input_projection(feature_map) # Third, flatten the feature map + object_queries of shape NxCxHxW to NxCxHW, and permute it to NxHWxC # In other words, turn their shape into (batch_size, sequence_length, hidden_size) flattened_features = projected_feature_map.flatten(2).permute(0, 2, 1) object_queries = object_queries_list[-1].flatten(2).permute(0, 2, 1) flattened_mask = mask.flatten(1) # Fourth, sent flattened_features + flattened_mask + object_queries through encoder # flattened_features is a Tensor of shape (batch_size, heigth*width, hidden_size) # flattened_mask is a Tensor of shape (batch_size, heigth*width) if encoder_outputs is None: encoder_outputs = self.conditional_detr.model.encoder( inputs_embeds=flattened_features, attention_mask=flattened_mask, object_queries=object_queries, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # Fifth, sent query embeddings + object_queries through the decoder (which is conditioned on the encoder output) query_position_embeddings = self.conditional_detr.model.query_position_embeddings.weight.unsqueeze(0).repeat( batch_size, 1, 1 ) queries = torch.zeros_like(query_position_embeddings) # decoder outputs consists of (dec_features, dec_hidden, dec_attn) decoder_outputs = self.conditional_detr.model.decoder( inputs_embeds=queries, attention_mask=None, object_queries=object_queries, query_position_embeddings=query_position_embeddings, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=flattened_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = decoder_outputs[0] # Sixth, compute logits, pred_boxes and pred_masks logits = self.conditional_detr.class_labels_classifier(sequence_output) pred_boxes = self.conditional_detr.bbox_predictor(sequence_output).sigmoid() memory = encoder_outputs[0].permute(0, 2, 1).view(batch_size, self.config.d_model, height, width) mask = flattened_mask.view(batch_size, height, width) # FIXME h_boxes takes the last one computed, keep this in mind # important: we need to reverse the mask, since in the original implementation the mask works reversed # bbox_mask is of shape (batch_size, num_queries, number_of_attention_heads in bbox_attention, height/32, width/32) bbox_mask = self.bbox_attention(sequence_output, memory, mask=~mask) seg_masks = self.mask_head(projected_feature_map, bbox_mask, [features[2][0], features[1][0], features[0][0]]) pred_masks = seg_masks.view( batch_size, self.conditional_detr.config.num_queries, seg_masks.shape[-2], seg_masks.shape[-1] ) loss, loss_dict, auxiliary_outputs = None, None, None if labels is not None: # First: create the matcher matcher = ConditionalDetrHungarianMatcher( class_cost=self.config.class_cost, bbox_cost=self.config.bbox_cost, giou_cost=self.config.giou_cost ) # Second: create the criterion losses = ["labels", "boxes", "cardinality", "masks"] criterion = ConditionalDetrLoss( matcher=matcher, num_classes=self.config.num_labels, focal_alpha=self.config.focal_alpha, losses=losses, ) criterion.to(self.device) # Third: compute the losses, based on outputs and labels outputs_loss = {} outputs_loss["logits"] = logits outputs_loss["pred_boxes"] = pred_boxes outputs_loss["pred_masks"] = pred_masks if self.config.auxiliary_loss: intermediate = decoder_outputs.intermediate_hidden_states if return_dict else decoder_outputs[-1] outputs_class = self.conditional_detr.class_labels_classifier(intermediate) outputs_coord = self.conditional_detr.bbox_predictor(intermediate).sigmoid() auxiliary_outputs = self.conditional_detr._set_aux_loss(outputs_class, outputs_coord) outputs_loss["auxiliary_outputs"] = auxiliary_outputs loss_dict = criterion(outputs_loss, labels) # Fourth: compute total loss, as a weighted sum of the various losses weight_dict = {"loss_ce": 1, "loss_bbox": self.config.bbox_loss_coefficient} weight_dict["loss_giou"] = self.config.giou_loss_coefficient weight_dict["loss_mask"] = self.config.mask_loss_coefficient weight_dict["loss_dice"] = self.config.dice_loss_coefficient if self.config.auxiliary_loss: aux_weight_dict = {} for i in range(self.config.decoder_layers - 1): aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()}) weight_dict.update(aux_weight_dict) loss = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict) if not return_dict: if auxiliary_outputs is not None: output = (logits, pred_boxes, pred_masks) + auxiliary_outputs + decoder_outputs + encoder_outputs else: output = (logits, pred_boxes, pred_masks) + decoder_outputs + encoder_outputs return ((loss, loss_dict) + output) if loss is not None else output return ConditionalDetrSegmentationOutput( loss=loss, loss_dict=loss_dict, logits=logits, pred_boxes=pred_boxes, pred_masks=pred_masks, auxiliary_outputs=auxiliary_outputs, last_hidden_state=decoder_outputs.last_hidden_state, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) def _expand(tensor, length: int): return tensor.unsqueeze(1).repeat(1, int(length), 1, 1, 1).flatten(0, 1) # Copied from transformers.models.detr.modeling_detr.DetrMaskHeadSmallConv with Detr->ConditionalDetr class ConditionalDetrMaskHeadSmallConv(nn.Module): """ Simple convolutional head, using group norm. Upsampling is done using a FPN approach """ def __init__(self, dim, fpn_dims, context_dim): super().__init__() if dim % 8 != 0: raise ValueError( "The hidden_size + number of attention heads must be divisible by 8 as the number of groups in" " GroupNorm is set to 8" ) inter_dims = [dim, context_dim // 2, context_dim // 4, context_dim // 8, context_dim // 16, context_dim // 64] self.lay1 = nn.Conv2d(dim, dim, 3, padding=1) self.gn1 = nn.GroupNorm(8, dim) self.lay2 = nn.Conv2d(dim, inter_dims[1], 3, padding=1) self.gn2 = nn.GroupNorm(min(8, inter_dims[1]), inter_dims[1]) self.lay3 = nn.Conv2d(inter_dims[1], inter_dims[2], 3, padding=1) self.gn3 = nn.GroupNorm(min(8, inter_dims[2]), inter_dims[2]) self.lay4 = nn.Conv2d(inter_dims[2], inter_dims[3], 3, padding=1) self.gn4 = nn.GroupNorm(min(8, inter_dims[3]), inter_dims[3]) self.lay5 = nn.Conv2d(inter_dims[3], inter_dims[4], 3, padding=1) self.gn5 = nn.GroupNorm(min(8, inter_dims[4]), inter_dims[4]) self.out_lay = nn.Conv2d(inter_dims[4], 1, 3, padding=1) self.dim = dim self.adapter1 = nn.Conv2d(fpn_dims[0], inter_dims[1], 1) self.adapter2 = nn.Conv2d(fpn_dims[1], inter_dims[2], 1) self.adapter3 = nn.Conv2d(fpn_dims[2], inter_dims[3], 1) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_uniform_(m.weight, a=1) nn.init.constant_(m.bias, 0) def forward(self, x: Tensor, bbox_mask: Tensor, fpns: List[Tensor]): # here we concatenate x, the projected feature map, of shape (batch_size, d_model, heigth/32, width/32) with # the bbox_mask = the attention maps of shape (batch_size, n_queries, n_heads, height/32, width/32). # We expand the projected feature map to match the number of heads. x = torch.cat([_expand(x, bbox_mask.shape[1]), bbox_mask.flatten(0, 1)], 1) x = self.lay1(x) x = self.gn1(x) x = nn.functional.relu(x) x = self.lay2(x) x = self.gn2(x) x = nn.functional.relu(x) cur_fpn = self.adapter1(fpns[0]) if cur_fpn.size(0) != x.size(0): cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0)) x = cur_fpn + nn.functional.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest") x = self.lay3(x) x = self.gn3(x) x = nn.functional.relu(x) cur_fpn = self.adapter2(fpns[1]) if cur_fpn.size(0) != x.size(0): cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0)) x = cur_fpn + nn.functional.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest") x = self.lay4(x) x = self.gn4(x) x = nn.functional.relu(x) cur_fpn = self.adapter3(fpns[2]) if cur_fpn.size(0) != x.size(0): cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0)) x = cur_fpn + nn.functional.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest") x = self.lay5(x) x = self.gn5(x) x = nn.functional.relu(x) x = self.out_lay(x) return x # Copied from transformers.models.detr.modeling_detr.DetrMHAttentionMap with Detr->ConditionalDetr class ConditionalDetrMHAttentionMap(nn.Module): """This is a 2D attention module, which only returns the attention softmax (no multiplication by value)""" def __init__(self, query_dim, hidden_dim, num_heads, dropout=0.0, bias=True, std=None): super().__init__() self.num_heads = num_heads self.hidden_dim = hidden_dim self.dropout = nn.Dropout(dropout) self.q_linear = nn.Linear(query_dim, hidden_dim, bias=bias) self.k_linear = nn.Linear(query_dim, hidden_dim, bias=bias) self.normalize_fact = float(hidden_dim / self.num_heads) ** -0.5 def forward(self, q, k, mask: Optional[Tensor] = None): q = self.q_linear(q) k = nn.functional.conv2d(k, self.k_linear.weight.unsqueeze(-1).unsqueeze(-1), self.k_linear.bias) queries_per_head = q.view(q.shape[0], q.shape[1], self.num_heads, self.hidden_dim // self.num_heads) keys_per_head = k.view(k.shape[0], self.num_heads, self.hidden_dim // self.num_heads, k.shape[-2], k.shape[-1]) weights = torch.einsum("bqnc,bnchw->bqnhw", queries_per_head * self.normalize_fact, keys_per_head) if mask is not None: weights.masked_fill_(mask.unsqueeze(1).unsqueeze(1), torch.finfo(weights.dtype).min) weights = nn.functional.softmax(weights.flatten(2), dim=-1).view(weights.size()) weights = self.dropout(weights) return weights # Copied from transformers.models.detr.modeling_detr.dice_loss def dice_loss(inputs, targets, num_boxes): """ Compute the DICE loss, similar to generalized IOU for masks Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). """ inputs = inputs.sigmoid() inputs = inputs.flatten(1) numerator = 2 * (inputs * targets).sum(1) denominator = inputs.sum(-1) + targets.sum(-1) loss = 1 - (numerator + 1) / (denominator + 1) return loss.sum() / num_boxes # Copied from transformers.models.detr.modeling_detr.sigmoid_focal_loss def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2): """ Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. Args: inputs (`torch.FloatTensor` of arbitrary shape): The predictions for each example. targets (`torch.FloatTensor` with the same shape as `inputs`) A tensor storing the binary classification label for each element in the `inputs` (0 for the negative class and 1 for the positive class). alpha (`float`, *optional*, defaults to `0.25`): Optional weighting factor in the range (0,1) to balance positive vs. negative examples. gamma (`int`, *optional*, defaults to `2`): Exponent of the modulating factor (1 - p_t) to balance easy vs hard examples. Returns: Loss tensor """ prob = inputs.sigmoid() ce_loss = nn.functional.binary_cross_entropy_with_logits(inputs, targets, reduction="none") # add modulating factor p_t = prob * targets + (1 - prob) * (1 - targets) loss = ce_loss * ((1 - p_t) ** gamma) if alpha >= 0: alpha_t = alpha * targets + (1 - alpha) * (1 - targets) loss = alpha_t * loss return loss.mean(1).sum() / num_boxes class ConditionalDetrLoss(nn.Module): """ This class computes the losses for ConditionalDetrForObjectDetection/ConditionalDetrForSegmentation. The process happens in two steps: 1) we compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of matched ground-truth / prediction (supervise class and box). Args: matcher (`ConditionalDetrHungarianMatcher`): Module able to compute a matching between targets and proposals. num_classes (`int`): Number of object categories, omitting the special no-object category. focal_alpha (`float`): Alpha parameter in focal loss. losses (`List[str]`): List of all the losses to be applied. See `get_loss` for a list of all available losses. """ # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrLoss.__init__ def __init__(self, matcher, num_classes, focal_alpha, losses): super().__init__() self.matcher = matcher self.num_classes = num_classes self.focal_alpha = focal_alpha self.losses = losses # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrLoss.loss_labels def loss_labels(self, outputs, targets, indices, num_boxes): """ Classification loss (Binary focal loss) targets dicts must contain the key "class_labels" containing a tensor of dim [nb_target_boxes] """ if "logits" not in outputs: raise KeyError("No logits were found in the outputs") source_logits = outputs["logits"] idx = self._get_source_permutation_idx(indices) target_classes_o = torch.cat([t["class_labels"][J] for t, (_, J) in zip(targets, indices)]) target_classes = torch.full( source_logits.shape[:2], self.num_classes, dtype=torch.int64, device=source_logits.device ) target_classes[idx] = target_classes_o target_classes_onehot = torch.zeros( [source_logits.shape[0], source_logits.shape[1], source_logits.shape[2] + 1], dtype=source_logits.dtype, layout=source_logits.layout, device=source_logits.device, ) target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1) target_classes_onehot = target_classes_onehot[:, :, :-1] loss_ce = ( sigmoid_focal_loss(source_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2) * source_logits.shape[1] ) losses = {"loss_ce": loss_ce} return losses @torch.no_grad() # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrLoss.loss_cardinality def loss_cardinality(self, outputs, targets, indices, num_boxes): """ Compute the cardinality error, i.e. the absolute error in the number of predicted non-empty boxes. This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients. """ logits = outputs["logits"] device = logits.device target_lengths = torch.as_tensor([len(v["class_labels"]) for v in targets], device=device) # Count the number of predictions that are NOT "no-object" (which is the last class) card_pred = (logits.argmax(-1) != logits.shape[-1] - 1).sum(1) card_err = nn.functional.l1_loss(card_pred.float(), target_lengths.float()) losses = {"cardinality_error": card_err} return losses # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrLoss.loss_boxes def loss_boxes(self, outputs, targets, indices, num_boxes): """ Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss. Targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]. The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size. """ if "pred_boxes" not in outputs: raise KeyError("No predicted boxes found in outputs") idx = self._get_source_permutation_idx(indices) source_boxes = outputs["pred_boxes"][idx] target_boxes = torch.cat([t["boxes"][i] for t, (_, i) in zip(targets, indices)], dim=0) loss_bbox = nn.functional.l1_loss(source_boxes, target_boxes, reduction="none") losses = {} losses["loss_bbox"] = loss_bbox.sum() / num_boxes loss_giou = 1 - torch.diag( generalized_box_iou(center_to_corners_format(source_boxes), center_to_corners_format(target_boxes)) ) losses["loss_giou"] = loss_giou.sum() / num_boxes return losses # Copied from transformers.models.detr.modeling_detr.DetrLoss.loss_masks def loss_masks(self, outputs, targets, indices, num_boxes): """ Compute the losses related to the masks: the focal loss and the dice loss. Targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w]. """ if "pred_masks" not in outputs: raise KeyError("No predicted masks found in outputs") source_idx = self._get_source_permutation_idx(indices) target_idx = self._get_target_permutation_idx(indices) source_masks = outputs["pred_masks"] source_masks = source_masks[source_idx] masks = [t["masks"] for t in targets] # TODO use valid to mask invalid areas due to padding in loss target_masks, valid = nested_tensor_from_tensor_list(masks).decompose() target_masks = target_masks.to(source_masks) target_masks = target_masks[target_idx] # upsample predictions to the target size source_masks = nn.functional.interpolate( source_masks[:, None], size=target_masks.shape[-2:], mode="bilinear", align_corners=False ) source_masks = source_masks[:, 0].flatten(1) target_masks = target_masks.flatten(1) target_masks = target_masks.view(source_masks.shape) losses = { "loss_mask": sigmoid_focal_loss(source_masks, target_masks, num_boxes), "loss_dice": dice_loss(source_masks, target_masks, num_boxes), } return losses # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrLoss._get_source_permutation_idx def _get_source_permutation_idx(self, indices): # permute predictions following indices batch_idx = torch.cat([torch.full_like(source, i) for i, (source, _) in enumerate(indices)]) source_idx = torch.cat([source for (source, _) in indices]) return batch_idx, source_idx # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrLoss._get_target_permutation_idx def _get_target_permutation_idx(self, indices): # permute targets following indices batch_idx = torch.cat([torch.full_like(target, i) for i, (_, target) in enumerate(indices)]) target_idx = torch.cat([target for (_, target) in indices]) return batch_idx, target_idx # Copied from transformers.models.detr.modeling_detr.DetrLoss.get_loss def get_loss(self, loss, outputs, targets, indices, num_boxes): loss_map = { "labels": self.loss_labels, "cardinality": self.loss_cardinality, "boxes": self.loss_boxes, "masks": self.loss_masks, } if loss not in loss_map: raise ValueError(f"Loss {loss} not supported") return loss_map[loss](outputs, targets, indices, num_boxes) # Copied from transformers.models.detr.modeling_detr.DetrLoss.forward def forward(self, outputs, targets): """ This performs the loss computation. Args: outputs (`dict`, *optional*): Dictionary of tensors, see the output specification of the model for the format. targets (`List[dict]`, *optional*): List of dicts, such that `len(targets) == batch_size`. The expected keys in each dict depends on the losses applied, see each loss' doc. """ outputs_without_aux = {k: v for k, v in outputs.items() if k != "auxiliary_outputs"} # Retrieve the matching between the outputs of the last layer and the targets indices = self.matcher(outputs_without_aux, targets) # Compute the average number of target boxes across all nodes, for normalization purposes num_boxes = sum(len(t["class_labels"]) for t in targets) num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device) world_size = 1 if is_accelerate_available(): if PartialState._shared_state != {}: num_boxes = reduce(num_boxes) world_size = PartialState().num_processes num_boxes = torch.clamp(num_boxes / world_size, min=1).item() # Compute all the requested losses losses = {} for loss in self.losses: losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes)) # In case of auxiliary losses, we repeat this process with the output of each intermediate layer. if "auxiliary_outputs" in outputs: for i, auxiliary_outputs in enumerate(outputs["auxiliary_outputs"]): indices = self.matcher(auxiliary_outputs, targets) for loss in self.losses: if loss == "masks": # Intermediate masks losses are too costly to compute, we ignore them. continue l_dict = self.get_loss(loss, auxiliary_outputs, targets, indices, num_boxes) l_dict = {k + f"_{i}": v for k, v in l_dict.items()} losses.update(l_dict) return losses # Copied from transformers.models.detr.modeling_detr.DetrMLPPredictionHead with Detr->ConditionalDetr class ConditionalDetrMLPPredictionHead(nn.Module): """ Very simple multi-layer perceptron (MLP, also called FFN), used to predict the normalized center coordinates, height and width of a bounding box w.r.t. an image. Copied from https://github.com/facebookresearch/detr/blob/master/models/detr.py """ def __init__(self, input_dim, hidden_dim, output_dim, num_layers): super().__init__() self.num_layers = num_layers h = [hidden_dim] * (num_layers - 1) self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) def forward(self, x): for i, layer in enumerate(self.layers): x = nn.functional.relu(layer(x)) if i < self.num_layers - 1 else layer(x) return x # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrHungarianMatcher with DeformableDetr->ConditionalDetr class ConditionalDetrHungarianMatcher(nn.Module): """ This class computes an assignment between the targets and the predictions of the network. For efficiency reasons, the targets don't include the no_object. Because of this, in general, there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, while the others are un-matched (and thus treated as non-objects). Args: class_cost: The relative weight of the classification error in the matching cost. bbox_cost: The relative weight of the L1 error of the bounding box coordinates in the matching cost. giou_cost: The relative weight of the giou loss of the bounding box in the matching cost. """ def __init__(self, class_cost: float = 1, bbox_cost: float = 1, giou_cost: float = 1): super().__init__() requires_backends(self, ["scipy"]) self.class_cost = class_cost self.bbox_cost = bbox_cost self.giou_cost = giou_cost if class_cost == 0 and bbox_cost == 0 and giou_cost == 0: raise ValueError("All costs of the Matcher can't be 0") @torch.no_grad() def forward(self, outputs, targets): """ Args: outputs (`dict`): A dictionary that contains at least these entries: * "logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits * "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates. targets (`List[dict]`): A list of targets (len(targets) = batch_size), where each target is a dict containing: * "class_labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth objects in the target) containing the class labels * "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates. Returns: `List[Tuple]`: A list of size `batch_size`, containing tuples of (index_i, index_j) where: - index_i is the indices of the selected predictions (in order) - index_j is the indices of the corresponding selected targets (in order) For each batch element, it holds: len(index_i) = len(index_j) = min(num_queries, num_target_boxes) """ batch_size, num_queries = outputs["logits"].shape[:2] # We flatten to compute the cost matrices in a batch out_prob = outputs["logits"].flatten(0, 1).sigmoid() # [batch_size * num_queries, num_classes] out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4] # Also concat the target labels and boxes target_ids = torch.cat([v["class_labels"] for v in targets]) target_bbox = torch.cat([v["boxes"] for v in targets]) # Compute the classification cost. alpha = 0.25 gamma = 2.0 neg_cost_class = (1 - alpha) * (out_prob**gamma) * (-(1 - out_prob + 1e-8).log()) pos_cost_class = alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log()) class_cost = pos_cost_class[:, target_ids] - neg_cost_class[:, target_ids] # Compute the L1 cost between boxes bbox_cost = torch.cdist(out_bbox, target_bbox, p=1) # Compute the giou cost between boxes giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox)) # Final cost matrix cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost cost_matrix = cost_matrix.view(batch_size, num_queries, -1).cpu() sizes = [len(v["boxes"]) for v in targets] indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_matrix.split(sizes, -1))] return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices] # Copied from transformers.models.detr.modeling_detr._upcast def _upcast(t: Tensor) -> Tensor: # Protects from numerical overflows in multiplications by upcasting to the equivalent higher type if t.is_floating_point(): return t if t.dtype in (torch.float32, torch.float64) else t.float() else: return t if t.dtype in (torch.int32, torch.int64) else t.int() # Copied from transformers.models.detr.modeling_detr.box_area def box_area(boxes: Tensor) -> Tensor: """ Computes the area of a set of bounding boxes, which are specified by its (x1, y1, x2, y2) coordinates. Args: boxes (`torch.FloatTensor` of shape `(number_of_boxes, 4)`): Boxes for which the area will be computed. They are expected to be in (x1, y1, x2, y2) format with `0 <= x1 < x2` and `0 <= y1 < y2`. Returns: `torch.FloatTensor`: a tensor containing the area for each box. """ boxes = _upcast(boxes) return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) # Copied from transformers.models.detr.modeling_detr.box_iou def box_iou(boxes1, boxes2): area1 = box_area(boxes1) area2 = box_area(boxes2) left_top = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2] right_bottom = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2] width_height = (right_bottom - left_top).clamp(min=0) # [N,M,2] inter = width_height[:, :, 0] * width_height[:, :, 1] # [N,M] union = area1[:, None] + area2 - inter iou = inter / union return iou, union # Copied from transformers.models.detr.modeling_detr.generalized_box_iou def generalized_box_iou(boxes1, boxes2): """ Generalized IoU from https://giou.stanford.edu/. The boxes should be in [x0, y0, x1, y1] (corner) format. Returns: `torch.FloatTensor`: a [N, M] pairwise matrix, where N = len(boxes1) and M = len(boxes2) """ # degenerate boxes gives inf / nan results # so do an early check if not (boxes1[:, 2:] >= boxes1[:, :2]).all(): raise ValueError(f"boxes1 must be in [x0, y0, x1, y1] (corner) format, but got {boxes1}") if not (boxes2[:, 2:] >= boxes2[:, :2]).all(): raise ValueError(f"boxes2 must be in [x0, y0, x1, y1] (corner) format, but got {boxes2}") iou, union = box_iou(boxes1, boxes2) top_left = torch.min(boxes1[:, None, :2], boxes2[:, :2]) bottom_right = torch.max(boxes1[:, None, 2:], boxes2[:, 2:]) width_height = (bottom_right - top_left).clamp(min=0) # [N,M,2] area = width_height[:, :, 0] * width_height[:, :, 1] return iou - (area - union) / area # Copied from transformers.models.detr.modeling_detr._max_by_axis def _max_by_axis(the_list): # type: (List[List[int]]) -> List[int] maxes = the_list[0] for sublist in the_list[1:]: for index, item in enumerate(sublist): maxes[index] = max(maxes[index], item) return maxes # Copied from transformers.models.detr.modeling_detr.NestedTensor class NestedTensor(object): def __init__(self, tensors, mask: Optional[Tensor]): self.tensors = tensors self.mask = mask def to(self, device): cast_tensor = self.tensors.to(device) mask = self.mask if mask is not None: cast_mask = mask.to(device) else: cast_mask = None return NestedTensor(cast_tensor, cast_mask) def decompose(self): return self.tensors, self.mask def __repr__(self): return str(self.tensors) # Copied from transformers.models.detr.modeling_detr.nested_tensor_from_tensor_list def nested_tensor_from_tensor_list(tensor_list: List[Tensor]): if tensor_list[0].ndim == 3: max_size = _max_by_axis([list(img.shape) for img in tensor_list]) batch_shape = [len(tensor_list)] + max_size batch_size, num_channels, height, width = batch_shape dtype = tensor_list[0].dtype device = tensor_list[0].device tensor = torch.zeros(batch_shape, dtype=dtype, device=device) mask = torch.ones((batch_size, height, width), dtype=torch.bool, device=device) for img, pad_img, m in zip(tensor_list, tensor, mask): pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) m[: img.shape[1], : img.shape[2]] = False else: raise ValueError("Only 3-dimensional tensors are supported") return NestedTensor(tensor, mask)
transformers/src/transformers/models/conditional_detr/modeling_conditional_detr.py/0
{ "file_path": "transformers/src/transformers/models/conditional_detr/modeling_conditional_detr.py", "repo_id": "transformers", "token_count": 55572 }
330
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) _import_structure = { "configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"], "tokenization_deberta": ["DebertaTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_deberta_fast"] = ["DebertaTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_deberta"] = [ "DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "DebertaForMaskedLM", "DebertaForQuestionAnswering", "DebertaForSequenceClassification", "DebertaForTokenClassification", "DebertaModel", "DebertaPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_tf_deberta"] = [ "TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDebertaForMaskedLM", "TFDebertaForQuestionAnswering", "TFDebertaForSequenceClassification", "TFDebertaForTokenClassification", "TFDebertaModel", "TFDebertaPreTrainedModel", ] if TYPE_CHECKING: from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig from .tokenization_deberta import DebertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_deberta_fast import DebertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deberta import ( DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, DebertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deberta import ( TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFDebertaForMaskedLM, TFDebertaForQuestionAnswering, TFDebertaForSequenceClassification, TFDebertaForTokenClassification, TFDebertaModel, TFDebertaPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
transformers/src/transformers/models/deberta/__init__.py/0
{ "file_path": "transformers/src/transformers/models/deberta/__init__.py", "repo_id": "transformers", "token_count": 1512 }
331
# coding=utf-8 # Copyright 2022 SenseTime and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Deformable DETR model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING logger = logging.get_logger(__name__) DEFORMABLE_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP = { "SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json", # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr } class DeformableDetrConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`DeformableDetrModel`]. It is used to instantiate a Deformable DETR model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Deformable DETR [SenseTime/deformable-detr](https://huggingface.co/SenseTime/deformable-detr) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: use_timm_backbone (`bool`, *optional*, defaults to `True`): Whether or not to use the `timm` library for the backbone. If set to `False`, will use the [`AutoBackbone`] API. backbone_config (`PretrainedConfig` or `dict`, *optional*): The configuration of the backbone model. Only used in case `use_timm_backbone` is set to `False` in which case it will default to `ResNetConfig()`. num_channels (`int`, *optional*, defaults to 3): The number of input channels. num_queries (`int`, *optional*, defaults to 300): Number of object queries, i.e. detection slots. This is the maximal number of objects [`DeformableDetrModel`] can detect in a single image. In case `two_stage` is set to `True`, we use `two_stage_num_proposals` instead. d_model (`int`, *optional*, defaults to 256): Dimension of the layers. encoder_layers (`int`, *optional*, defaults to 6): Number of encoder layers. decoder_layers (`int`, *optional*, defaults to 6): Number of decoder layers. encoder_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 1024): Dimension of the "intermediate" (often named feed-forward) layer in decoder. encoder_ffn_dim (`int`, *optional*, defaults to 1024): Dimension of the "intermediate" (often named feed-forward) layer in decoder. activation_function (`str` or `function`, *optional*, defaults to `"relu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. init_xavier_std (`float`, *optional*, defaults to 1): The scaling factor used for the Xavier initialization gain in the HM Attention map module. encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. auxiliary_loss (`bool`, *optional*, defaults to `False`): Whether auxiliary decoding losses (loss at each decoder layer) are to be used. position_embedding_type (`str`, *optional*, defaults to `"sine"`): Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`. backbone (`str`, *optional*, defaults to `"resnet50"`): Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. use_pretrained_backbone (`bool`, *optional*, defaults to `True`): Whether to use pretrained weights for the backbone. backbone_kwargs (`dict`, *optional*): Keyword arguments to be passed to AutoBackbone when loading from a checkpoint e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. dilation (`bool`, *optional*, defaults to `False`): Whether to replace stride with dilation in the last convolutional block (DC5). Only supported when `use_timm_backbone` = `True`. class_cost (`float`, *optional*, defaults to 1): Relative weight of the classification error in the Hungarian matching cost. bbox_cost (`float`, *optional*, defaults to 5): Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost. giou_cost (`float`, *optional*, defaults to 2): Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost. mask_loss_coefficient (`float`, *optional*, defaults to 1): Relative weight of the Focal loss in the panoptic segmentation loss. dice_loss_coefficient (`float`, *optional*, defaults to 1): Relative weight of the DICE/F-1 loss in the panoptic segmentation loss. bbox_loss_coefficient (`float`, *optional*, defaults to 5): Relative weight of the L1 bounding box loss in the object detection loss. giou_loss_coefficient (`float`, *optional*, defaults to 2): Relative weight of the generalized IoU loss in the object detection loss. eos_coefficient (`float`, *optional*, defaults to 0.1): Relative classification weight of the 'no-object' class in the object detection loss. num_feature_levels (`int`, *optional*, defaults to 4): The number of input feature levels. encoder_n_points (`int`, *optional*, defaults to 4): The number of sampled keys in each feature level for each attention head in the encoder. decoder_n_points (`int`, *optional*, defaults to 4): The number of sampled keys in each feature level for each attention head in the decoder. two_stage (`bool`, *optional*, defaults to `False`): Whether to apply a two-stage deformable DETR, where the region proposals are also generated by a variant of Deformable DETR, which are further fed into the decoder for iterative bounding box refinement. two_stage_num_proposals (`int`, *optional*, defaults to 300): The number of region proposals to be generated, in case `two_stage` is set to `True`. with_box_refine (`bool`, *optional*, defaults to `False`): Whether to apply iterative bounding box refinement, where each decoder layer refines the bounding boxes based on the predictions from the previous layer. focal_alpha (`float`, *optional*, defaults to 0.25): Alpha parameter in the focal loss. disable_custom_kernels (`bool`, *optional*, defaults to `False`): Disable the use of custom CUDA and CPU kernels. This option is necessary for the ONNX export, as custom kernels are not supported by PyTorch ONNX export. Examples: ```python >>> from transformers import DeformableDetrConfig, DeformableDetrModel >>> # Initializing a Deformable DETR SenseTime/deformable-detr style configuration >>> configuration = DeformableDetrConfig() >>> # Initializing a model (with random weights) from the SenseTime/deformable-detr style configuration >>> model = DeformableDetrModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "deformable_detr" attribute_map = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self, use_timm_backbone=True, backbone_config=None, num_channels=3, num_queries=300, max_position_embeddings=1024, encoder_layers=6, encoder_ffn_dim=1024, encoder_attention_heads=8, decoder_layers=6, decoder_ffn_dim=1024, decoder_attention_heads=8, encoder_layerdrop=0.0, is_encoder_decoder=True, activation_function="relu", d_model=256, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, init_xavier_std=1.0, return_intermediate=True, auxiliary_loss=False, position_embedding_type="sine", backbone="resnet50", use_pretrained_backbone=True, backbone_kwargs=None, dilation=False, num_feature_levels=4, encoder_n_points=4, decoder_n_points=4, two_stage=False, two_stage_num_proposals=300, with_box_refine=False, class_cost=1, bbox_cost=5, giou_cost=2, mask_loss_coefficient=1, dice_loss_coefficient=1, bbox_loss_coefficient=5, giou_loss_coefficient=2, eos_coefficient=0.1, focal_alpha=0.25, disable_custom_kernels=False, **kwargs, ): if not use_timm_backbone and use_pretrained_backbone: raise ValueError( "Loading pretrained backbone weights from the transformers library is not supported yet. `use_timm_backbone` must be set to `True` when `use_pretrained_backbone=True`" ) if backbone_config is not None and backbone is not None: raise ValueError("You can't specify both `backbone` and `backbone_config`.") if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`.") if backbone_kwargs is not None and backbone_kwargs and backbone_config is not None: raise ValueError("You can't specify both `backbone_kwargs` and `backbone_config`.") if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.") backbone_config = CONFIG_MAPPING["resnet"](out_features=["stage4"]) elif isinstance(backbone_config, dict): backbone_model_type = backbone_config.get("model_type") config_class = CONFIG_MAPPING[backbone_model_type] backbone_config = config_class.from_dict(backbone_config) self.use_timm_backbone = use_timm_backbone self.backbone_config = backbone_config self.num_channels = num_channels self.num_queries = num_queries self.max_position_embeddings = max_position_embeddings self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.init_xavier_std = init_xavier_std self.encoder_layerdrop = encoder_layerdrop self.auxiliary_loss = auxiliary_loss self.position_embedding_type = position_embedding_type self.backbone = backbone self.use_pretrained_backbone = use_pretrained_backbone self.backbone_kwargs = backbone_kwargs self.dilation = dilation # deformable attributes self.num_feature_levels = num_feature_levels self.encoder_n_points = encoder_n_points self.decoder_n_points = decoder_n_points self.two_stage = two_stage self.two_stage_num_proposals = two_stage_num_proposals self.with_box_refine = with_box_refine if two_stage is True and with_box_refine is False: raise ValueError("If two_stage is True, with_box_refine must be True.") # Hungarian matcher self.class_cost = class_cost self.bbox_cost = bbox_cost self.giou_cost = giou_cost # Loss coefficients self.mask_loss_coefficient = mask_loss_coefficient self.dice_loss_coefficient = dice_loss_coefficient self.bbox_loss_coefficient = bbox_loss_coefficient self.giou_loss_coefficient = giou_loss_coefficient self.eos_coefficient = eos_coefficient self.focal_alpha = focal_alpha self.disable_custom_kernels = disable_custom_kernels super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs) @property def num_attention_heads(self) -> int: return self.encoder_attention_heads @property def hidden_size(self) -> int: return self.d_model
transformers/src/transformers/models/deformable_detr/configuration_deformable_detr.py/0
{ "file_path": "transformers/src/transformers/models/deformable_detr/configuration_deformable_detr.py", "repo_id": "transformers", "token_count": 5659 }
332
# coding=utf-8 # Copyright 2022 BNRist (Tsinghua University), TKLNDST (Nankai University) and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert VAN checkpoints from the original repository. URL: https://github.com/Visual-Attention-Network/VAN-Classification""" import argparse import json import sys from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import torch import torch.nn as nn from huggingface_hub import cached_download, hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, VanConfig, VanForImageClassification from transformers.models.deprecated.van.modeling_van import VanLayerScaling from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) @dataclass class Tracker: module: nn.Module traced: List[nn.Module] = field(default_factory=list) handles: list = field(default_factory=list) def _forward_hook(self, m, inputs: Tensor, outputs: Tensor): has_not_submodules = len(list(m.modules())) == 1 or isinstance(m, nn.Conv2d) or isinstance(m, nn.BatchNorm2d) if has_not_submodules: if not isinstance(m, VanLayerScaling): self.traced.append(m) def __call__(self, x: Tensor): for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook)) self.module(x) [x.remove() for x in self.handles] return self @property def parametrized(self): # check the len of the state_dict keys to see if we have learnable params return list(filter(lambda x: len(list(x.state_dict().keys())) > 0, self.traced)) @dataclass class ModuleTransfer: src: nn.Module dest: nn.Module verbose: int = 0 src_skip: List = field(default_factory=list) dest_skip: List = field(default_factory=list) def __call__(self, x: Tensor): """ Transfer the weights of `self.src` to `self.dest` by performing a forward pass using `x` as input. Under the hood we tracked all the operations in both modules. """ dest_traced = Tracker(self.dest)(x).parametrized src_traced = Tracker(self.src)(x).parametrized src_traced = list(filter(lambda x: type(x) not in self.src_skip, src_traced)) dest_traced = list(filter(lambda x: type(x) not in self.dest_skip, dest_traced)) if len(dest_traced) != len(src_traced): raise Exception( f"Numbers of operations are different. Source module has {len(src_traced)} operations while" f" destination module has {len(dest_traced)}." ) for dest_m, src_m in zip(dest_traced, src_traced): dest_m.load_state_dict(src_m.state_dict()) if self.verbose == 1: print(f"Transfered from={src_m} to={dest_m}") def copy_parameters(from_model: nn.Module, our_model: nn.Module) -> nn.Module: # nn.Parameter cannot be tracked by the Tracker, thus we need to manually convert them from_state_dict = from_model.state_dict() our_state_dict = our_model.state_dict() config = our_model.config all_keys = [] for stage_idx in range(len(config.hidden_sizes)): for block_id in range(config.depths[stage_idx]): from_key = f"block{stage_idx + 1}.{block_id}.layer_scale_1" to_key = f"van.encoder.stages.{stage_idx}.layers.{block_id}.attention_scaling.weight" all_keys.append((from_key, to_key)) from_key = f"block{stage_idx + 1}.{block_id}.layer_scale_2" to_key = f"van.encoder.stages.{stage_idx}.layers.{block_id}.mlp_scaling.weight" all_keys.append((from_key, to_key)) for from_key, to_key in all_keys: our_state_dict[to_key] = from_state_dict.pop(from_key) our_model.load_state_dict(our_state_dict) return our_model def convert_weight_and_push( name: str, config: VanConfig, checkpoint: str, from_model: nn.Module, save_directory: Path, push_to_hub: bool = True, ): print(f"Downloading weights for {name}...") checkpoint_path = cached_download(checkpoint) print(f"Converting {name}...") from_state_dict = torch.load(checkpoint_path)["state_dict"] from_model.load_state_dict(from_state_dict) from_model.eval() with torch.no_grad(): our_model = VanForImageClassification(config).eval() module_transfer = ModuleTransfer(src=from_model, dest=our_model) x = torch.randn((1, 3, 224, 224)) module_transfer(x) our_model = copy_parameters(from_model, our_model) if not torch.allclose(from_model(x), our_model(x).logits): raise ValueError("The model logits don't match the original one.") checkpoint_name = name print(checkpoint_name) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name, commit_message="Add model", use_temp_dir=True, ) # we can use the convnext one image_processor = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k") image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name, commit_message="Add image processor", use_temp_dir=True, ) print(f"Pushed {checkpoint_name}") def convert_weights_and_push(save_directory: Path, model_name: str = None, push_to_hub: bool = True): filename = "imagenet-1k-id2label.json" num_labels = 1000 repo_id = "huggingface/label-files" num_labels = num_labels id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} id2label = id2label label2id = {v: k for k, v in id2label.items()} ImageNetPreTrainedConfig = partial(VanConfig, num_labels=num_labels, id2label=id2label, label2id=label2id) names_to_config = { "van-tiny": ImageNetPreTrainedConfig( hidden_sizes=[32, 64, 160, 256], depths=[3, 3, 5, 2], mlp_ratios=[8, 8, 4, 4], ), "van-small": ImageNetPreTrainedConfig( hidden_sizes=[64, 128, 320, 512], depths=[2, 2, 4, 2], mlp_ratios=[8, 8, 4, 4], ), "van-base": ImageNetPreTrainedConfig( hidden_sizes=[64, 128, 320, 512], depths=[3, 3, 12, 3], mlp_ratios=[8, 8, 4, 4], ), "van-large": ImageNetPreTrainedConfig( hidden_sizes=[64, 128, 320, 512], depths=[3, 5, 27, 3], mlp_ratios=[8, 8, 4, 4], ), } names_to_original_models = { "van-tiny": van_tiny, "van-small": van_small, "van-base": van_base, "van-large": van_large, } names_to_original_checkpoints = { "van-tiny": ( "https://huggingface.co/Visual-Attention-Network/VAN-Tiny-original/resolve/main/van_tiny_754.pth.tar" ), "van-small": ( "https://huggingface.co/Visual-Attention-Network/VAN-Small-original/resolve/main/van_small_811.pth.tar" ), "van-base": ( "https://huggingface.co/Visual-Attention-Network/VAN-Base-original/resolve/main/van_base_828.pth.tar" ), "van-large": ( "https://huggingface.co/Visual-Attention-Network/VAN-Large-original/resolve/main/van_large_839.pth.tar" ), } if model_name: convert_weight_and_push( model_name, names_to_config[model_name], checkpoint=names_to_original_checkpoints[model_name], from_model=names_to_original_models[model_name](), save_directory=save_directory, push_to_hub=push_to_hub, ) else: for model_name, config in names_to_config.items(): convert_weight_and_push( model_name, config, checkpoint=names_to_original_checkpoints[model_name], from_model=names_to_original_models[model_name](), save_directory=save_directory, push_to_hub=push_to_hub, ) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model-name", default=None, type=str, help=( "The name of the model you wish to convert, it must be one of the supported resnet* architecture," " currently: van-tiny/small/base/large. If `None`, all of them will the converted." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=Path, required=True, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--van_dir", required=True, type=Path, help=( "A path to VAN's original implementation directory. You can download from here:" " https://github.com/Visual-Attention-Network/VAN-Classification" ), ) parser.add_argument( "--push_to_hub", default=True, type=bool, required=False, help="If True, push model and image processor to the hub.", ) args = parser.parse_args() pytorch_dump_folder_path: Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) van_dir = args.van_dir # append the path to the parents to maskformer dir sys.path.append(str(van_dir.parent)) from van.models.van import van_base, van_large, van_small, van_tiny convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
transformers/src/transformers/models/deprecated/van/convert_van_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/deprecated/van/convert_van_to_pytorch.py", "repo_id": "transformers", "token_count": 4514 }
333
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TF 2.0 DistilBERT model """ from __future__ import annotations import warnings from typing import Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import ( TFBaseModelOutput, TFMaskedLMOutput, TFMultipleChoiceModelOutput, TFQuestionAnsweringModelOutput, TFSequenceClassifierOutput, TFTokenClassifierOutput, ) from ...modeling_tf_utils import ( TFMaskedLanguageModelingLoss, TFModelInputType, TFMultipleChoiceLoss, TFPreTrainedModel, TFQuestionAnsweringLoss, TFSequenceClassificationLoss, TFTokenClassificationLoss, get_initializer, keras, keras_serializable, unpack_inputs, ) from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, ) from .configuration_distilbert import DistilBertConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "distilbert-base-uncased" _CONFIG_FOR_DOC = "DistilBertConfig" TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [ "distilbert-base-uncased", "distilbert-base-uncased-distilled-squad", "distilbert-base-cased", "distilbert-base-cased-distilled-squad", "distilbert-base-multilingual-cased", "distilbert-base-uncased-finetuned-sst-2-english", # See all DistilBERT models at https://huggingface.co/models?filter=distilbert ] class TFEmbeddings(keras.layers.Layer): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config, **kwargs): super().__init__(**kwargs) self.config = config self.dim = config.dim self.initializer_range = config.initializer_range self.max_position_embeddings = config.max_position_embeddings self.LayerNorm = keras.layers.LayerNormalization(epsilon=1e-12, name="LayerNorm") self.dropout = keras.layers.Dropout(rate=config.dropout) def build(self, input_shape=None): with tf.name_scope("word_embeddings"): self.weight = self.add_weight( name="weight", shape=[self.config.vocab_size, self.dim], initializer=get_initializer(initializer_range=self.initializer_range), ) with tf.name_scope("position_embeddings"): self.position_embeddings = self.add_weight( name="embeddings", shape=[self.max_position_embeddings, self.dim], initializer=get_initializer(initializer_range=self.initializer_range), ) if self.built: return self.built = True if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.dim]) def call(self, input_ids=None, position_ids=None, inputs_embeds=None, training=False): """ Applies embedding based on inputs tensor. Returns: final_embeddings (`tf.Tensor`): output embedding tensor. """ assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] if position_ids is None: position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0) position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids) final_embeddings = inputs_embeds + position_embeds final_embeddings = self.LayerNorm(inputs=final_embeddings) final_embeddings = self.dropout(inputs=final_embeddings, training=training) return final_embeddings class TFMultiHeadSelfAttention(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.n_heads = config.n_heads self.dim = config.dim self.dropout = keras.layers.Dropout(config.attention_dropout) self.output_attentions = config.output_attentions assert self.dim % self.n_heads == 0, f"Hidden size {self.dim} not dividable by number of heads {self.n_heads}" self.q_lin = keras.layers.Dense( config.dim, kernel_initializer=get_initializer(config.initializer_range), name="q_lin" ) self.k_lin = keras.layers.Dense( config.dim, kernel_initializer=get_initializer(config.initializer_range), name="k_lin" ) self.v_lin = keras.layers.Dense( config.dim, kernel_initializer=get_initializer(config.initializer_range), name="v_lin" ) self.out_lin = keras.layers.Dense( config.dim, kernel_initializer=get_initializer(config.initializer_range), name="out_lin" ) self.pruned_heads = set() self.config = config def prune_heads(self, heads): raise NotImplementedError def call(self, query, key, value, mask, head_mask, output_attentions, training=False): """ Parameters: query: tf.Tensor(bs, seq_length, dim) key: tf.Tensor(bs, seq_length, dim) value: tf.Tensor(bs, seq_length, dim) mask: tf.Tensor(bs, seq_length) Returns: weights: tf.Tensor(bs, n_heads, seq_length, seq_length) Attention weights context: tf.Tensor(bs, seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True` """ bs, q_length, dim = shape_list(query) k_length = shape_list(key)[1] # assert dim == self.dim, f'Dimensions do not match: {dim} input vs {self.dim} configured' # assert key.size() == value.size() dim_per_head = int(self.dim / self.n_heads) dim_per_head = tf.cast(dim_per_head, dtype=tf.int32) mask_reshape = [bs, 1, 1, k_length] def shape(x): """separate heads""" return tf.transpose(tf.reshape(x, (bs, -1, self.n_heads, dim_per_head)), perm=(0, 2, 1, 3)) def unshape(x): """group heads""" return tf.reshape(tf.transpose(x, perm=(0, 2, 1, 3)), (bs, -1, self.n_heads * dim_per_head)) q = shape(self.q_lin(query)) # (bs, n_heads, q_length, dim_per_head) k = shape(self.k_lin(key)) # (bs, n_heads, k_length, dim_per_head) v = shape(self.v_lin(value)) # (bs, n_heads, k_length, dim_per_head) q = tf.cast(q, dtype=tf.float32) q = tf.multiply(q, tf.math.rsqrt(tf.cast(dim_per_head, dtype=tf.float32))) k = tf.cast(k, dtype=q.dtype) scores = tf.matmul(q, k, transpose_b=True) # (bs, n_heads, q_length, k_length) mask = tf.reshape(mask, mask_reshape) # (bs, n_heads, qlen, klen) # scores.masked_fill_(mask, -float('inf')) # (bs, n_heads, q_length, k_length) mask = tf.cast(mask, dtype=scores.dtype) scores = scores - 1e30 * (1.0 - mask) weights = stable_softmax(scores, axis=-1) # (bs, n_heads, qlen, klen) weights = self.dropout(weights, training=training) # (bs, n_heads, qlen, klen) # Mask heads if we want to if head_mask is not None: weights = weights * head_mask context = tf.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head) context = unshape(context) # (bs, q_length, dim) context = self.out_lin(context) # (bs, q_length, dim) if output_attentions: return (context, weights) else: return (context,) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "q_lin", None) is not None: with tf.name_scope(self.q_lin.name): self.q_lin.build([None, None, self.config.dim]) if getattr(self, "k_lin", None) is not None: with tf.name_scope(self.k_lin.name): self.k_lin.build([None, None, self.config.dim]) if getattr(self, "v_lin", None) is not None: with tf.name_scope(self.v_lin.name): self.v_lin.build([None, None, self.config.dim]) if getattr(self, "out_lin", None) is not None: with tf.name_scope(self.out_lin.name): self.out_lin.build([None, None, self.config.dim]) class TFFFN(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dropout = keras.layers.Dropout(config.dropout) self.lin1 = keras.layers.Dense( config.hidden_dim, kernel_initializer=get_initializer(config.initializer_range), name="lin1" ) self.lin2 = keras.layers.Dense( config.dim, kernel_initializer=get_initializer(config.initializer_range), name="lin2" ) self.activation = get_tf_activation(config.activation) self.config = config def call(self, input, training=False): x = self.lin1(input) x = self.activation(x) x = self.lin2(x) x = self.dropout(x, training=training) return x def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "lin1", None) is not None: with tf.name_scope(self.lin1.name): self.lin1.build([None, None, self.config.dim]) if getattr(self, "lin2", None) is not None: with tf.name_scope(self.lin2.name): self.lin2.build([None, None, self.config.hidden_dim]) class TFTransformerBlock(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.n_heads = config.n_heads self.dim = config.dim self.hidden_dim = config.hidden_dim self.dropout = keras.layers.Dropout(config.dropout) self.activation = config.activation self.output_attentions = config.output_attentions assert ( config.dim % config.n_heads == 0 ), f"Hidden size {config.dim} not dividable by number of heads {config.n_heads}" self.attention = TFMultiHeadSelfAttention(config, name="attention") self.sa_layer_norm = keras.layers.LayerNormalization(epsilon=1e-12, name="sa_layer_norm") self.ffn = TFFFN(config, name="ffn") self.output_layer_norm = keras.layers.LayerNormalization(epsilon=1e-12, name="output_layer_norm") self.config = config def call(self, x, attn_mask, head_mask, output_attentions, training=False): # removed: src_enc=None, src_len=None """ Parameters: x: tf.Tensor(bs, seq_length, dim) attn_mask: tf.Tensor(bs, seq_length) Outputs: sa_weights: tf.Tensor(bs, n_heads, seq_length, seq_length) The attention weights ffn_output: tf.Tensor(bs, seq_length, dim) The output of the transformer block contextualization. """ # Self-Attention sa_output = self.attention(x, x, x, attn_mask, head_mask, output_attentions, training=training) if output_attentions: sa_output, sa_weights = sa_output # (bs, seq_length, dim), (bs, n_heads, seq_length, seq_length) else: # To handle these `output_attentions` or `output_hidden_states` cases returning tuples # assert type(sa_output) == tuple sa_output = sa_output[0] sa_output = self.sa_layer_norm(sa_output + x) # (bs, seq_length, dim) # Feed Forward Network ffn_output = self.ffn(sa_output, training=training) # (bs, seq_length, dim) ffn_output = self.output_layer_norm(ffn_output + sa_output) # (bs, seq_length, dim) output = (ffn_output,) if output_attentions: output = (sa_weights,) + output return output def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "attention", None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) if getattr(self, "sa_layer_norm", None) is not None: with tf.name_scope(self.sa_layer_norm.name): self.sa_layer_norm.build([None, None, self.config.dim]) if getattr(self, "ffn", None) is not None: with tf.name_scope(self.ffn.name): self.ffn.build(None) if getattr(self, "output_layer_norm", None) is not None: with tf.name_scope(self.output_layer_norm.name): self.output_layer_norm.build([None, None, self.config.dim]) class TFTransformer(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.n_layers = config.n_layers self.output_hidden_states = config.output_hidden_states self.output_attentions = config.output_attentions self.layer = [TFTransformerBlock(config, name=f"layer_._{i}") for i in range(config.n_layers)] def call(self, x, attn_mask, head_mask, output_attentions, output_hidden_states, return_dict, training=False): # docstyle-ignore """ Parameters: x: tf.Tensor(bs, seq_length, dim) Input sequence embedded. attn_mask: tf.Tensor(bs, seq_length) Attention mask on the sequence. Returns: hidden_state: tf.Tensor(bs, seq_length, dim) Sequence of hidden states in the last (top) layer all_hidden_states: Tuple[tf.Tensor(bs, seq_length, dim)] Tuple of length n_layers with the hidden states from each layer. Optional: only if output_hidden_states=True all_attentions: Tuple[tf.Tensor(bs, n_heads, seq_length, seq_length)] Tuple of length n_layers with the attention weights from each layer Optional: only if output_attentions=True """ all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_state = x for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_state,) layer_outputs = layer_module(hidden_state, attn_mask, head_mask[i], output_attentions, training=training) hidden_state = layer_outputs[-1] if output_attentions: assert len(layer_outputs) == 2 attentions = layer_outputs[0] all_attentions = all_attentions + (attentions,) else: assert len(layer_outputs) == 1, f"Incorrect number of outputs {len(layer_outputs)} instead of 1" # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, all_hidden_states, all_attentions] if v is not None) return TFBaseModelOutput( last_hidden_state=hidden_state, hidden_states=all_hidden_states, attentions=all_attentions ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layer", None) is not None: for layer in self.layer: with tf.name_scope(layer.name): layer.build(None) @keras_serializable class TFDistilBertMainLayer(keras.layers.Layer): config_class = DistilBertConfig def __init__(self, config, **kwargs): super().__init__(**kwargs) self.config = config self.num_hidden_layers = config.num_hidden_layers self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.return_dict = config.use_return_dict self.embeddings = TFEmbeddings(config, name="embeddings") # Embeddings self.transformer = TFTransformer(config, name="transformer") # Encoder def get_input_embeddings(self): return self.embeddings def set_input_embeddings(self, value): self.embeddings.weight = value self.embeddings.vocab_size = value.shape[0] def _prune_heads(self, heads_to_prune): raise NotImplementedError @unpack_inputs def call( self, input_ids=None, attention_mask=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ): if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if attention_mask is None: attention_mask = tf.ones(input_shape) # (bs, seq_length) attention_mask = tf.cast(attention_mask, dtype=tf.float32) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] if head_mask is not None: raise NotImplementedError else: head_mask = [None] * self.num_hidden_layers embedding_output = self.embeddings(input_ids, inputs_embeds=inputs_embeds) # (bs, seq_length, dim) tfmr_output = self.transformer( embedding_output, attention_mask, head_mask, output_attentions, output_hidden_states, return_dict, training=training, ) return tfmr_output # last-layer hidden-state, (all hidden_states), (all attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embeddings", None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, "transformer", None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None) # INTERFACE FOR ENCODER AND TASK SPECIFIC MODEL # class TFDistilBertPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = DistilBertConfig base_model_prefix = "distilbert" DISTILBERT_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Parameters: config ([`DistilBertConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ DISTILBERT_INPUTS_DOCSTRING = r""" Args: input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @add_start_docstrings( "The bare DistilBERT encoder/transformer outputting raw hidden-states without any specific head on top.", DISTILBERT_START_DOCSTRING, ) class TFDistilBertModel(TFDistilBertPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.distilbert = TFDistilBertMainLayer(config, name="distilbert") # Embeddings @unpack_inputs @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]: outputs = self.distilbert( input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "distilbert", None) is not None: with tf.name_scope(self.distilbert.name): self.distilbert.build(None) class TFDistilBertLMHead(keras.layers.Layer): def __init__(self, config, input_embeddings, **kwargs): super().__init__(**kwargs) self.config = config self.dim = config.dim # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.input_embeddings = input_embeddings def build(self, input_shape): self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias") super().build(input_shape) def get_output_embeddings(self): return self.input_embeddings def set_output_embeddings(self, value): self.input_embeddings.weight = value self.input_embeddings.vocab_size = shape_list(value)[0] def get_bias(self): return {"bias": self.bias} def set_bias(self, value): self.bias = value["bias"] self.config.vocab_size = shape_list(value["bias"])[0] def call(self, hidden_states): seq_length = shape_list(tensor=hidden_states)[1] hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.dim]) hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True) hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size]) hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias) return hidden_states @add_start_docstrings( """DistilBert Model with a `masked language modeling` head on top.""", DISTILBERT_START_DOCSTRING, ) class TFDistilBertForMaskedLM(TFDistilBertPreTrainedModel, TFMaskedLanguageModelingLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.config = config self.distilbert = TFDistilBertMainLayer(config, name="distilbert") self.vocab_transform = keras.layers.Dense( config.dim, kernel_initializer=get_initializer(config.initializer_range), name="vocab_transform" ) self.act = get_tf_activation(config.activation) self.vocab_layer_norm = keras.layers.LayerNormalization(epsilon=1e-12, name="vocab_layer_norm") self.vocab_projector = TFDistilBertLMHead(config, self.distilbert.embeddings, name="vocab_projector") def get_lm_head(self): return self.vocab_projector def get_prefix_bias_name(self): warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning) return self.name + "/" + self.vocab_projector.name @unpack_inputs @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` """ distilbert_output = self.distilbert( input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) hidden_states = distilbert_output[0] # (bs, seq_length, dim) prediction_logits = self.vocab_transform(hidden_states) # (bs, seq_length, dim) prediction_logits = self.act(prediction_logits) # (bs, seq_length, dim) prediction_logits = self.vocab_layer_norm(prediction_logits) # (bs, seq_length, dim) prediction_logits = self.vocab_projector(prediction_logits) loss = None if labels is None else self.hf_compute_loss(labels, prediction_logits) if not return_dict: output = (prediction_logits,) + distilbert_output[1:] return ((loss,) + output) if loss is not None else output return TFMaskedLMOutput( loss=loss, logits=prediction_logits, hidden_states=distilbert_output.hidden_states, attentions=distilbert_output.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "distilbert", None) is not None: with tf.name_scope(self.distilbert.name): self.distilbert.build(None) if getattr(self, "vocab_transform", None) is not None: with tf.name_scope(self.vocab_transform.name): self.vocab_transform.build([None, None, self.config.dim]) if getattr(self, "vocab_layer_norm", None) is not None: with tf.name_scope(self.vocab_layer_norm.name): self.vocab_layer_norm.build([None, None, self.config.dim]) if getattr(self, "vocab_projector", None) is not None: with tf.name_scope(self.vocab_projector.name): self.vocab_projector.build(None) @add_start_docstrings( """ DistilBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, DISTILBERT_START_DOCSTRING, ) class TFDistilBertForSequenceClassification(TFDistilBertPreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.distilbert = TFDistilBertMainLayer(config, name="distilbert") self.pre_classifier = keras.layers.Dense( config.dim, kernel_initializer=get_initializer(config.initializer_range), activation="relu", name="pre_classifier", ) self.classifier = keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) self.dropout = keras.layers.Dropout(config.seq_classif_dropout) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ distilbert_output = self.distilbert( input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) hidden_state = distilbert_output[0] # (bs, seq_len, dim) pooled_output = hidden_state[:, 0] # (bs, dim) pooled_output = self.pre_classifier(pooled_output) # (bs, dim) pooled_output = self.dropout(pooled_output, training=training) # (bs, dim) logits = self.classifier(pooled_output) # (bs, dim) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + distilbert_output[1:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput( loss=loss, logits=logits, hidden_states=distilbert_output.hidden_states, attentions=distilbert_output.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "distilbert", None) is not None: with tf.name_scope(self.distilbert.name): self.distilbert.build(None) if getattr(self, "pre_classifier", None) is not None: with tf.name_scope(self.pre_classifier.name): self.pre_classifier.build([None, None, self.config.dim]) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.dim]) @add_start_docstrings( """ DistilBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, DISTILBERT_START_DOCSTRING, ) class TFDistilBertForTokenClassification(TFDistilBertPreTrainedModel, TFTokenClassificationLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.distilbert = TFDistilBertMainLayer(config, name="distilbert") self.dropout = keras.layers.Dropout(config.dropout) self.classifier = keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ outputs = self.distilbert( input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output, training=training) logits = self.classifier(sequence_output) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TFTokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "distilbert", None) is not None: with tf.name_scope(self.distilbert.name): self.distilbert.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) @add_start_docstrings( """ DistilBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, DISTILBERT_START_DOCSTRING, ) class TFDistilBertForMultipleChoice(TFDistilBertPreTrainedModel, TFMultipleChoiceLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.distilbert = TFDistilBertMainLayer(config, name="distilbert") self.dropout = keras.layers.Dropout(config.seq_classif_dropout) self.pre_classifier = keras.layers.Dense( config.dim, kernel_initializer=get_initializer(config.initializer_range), activation="relu", name="pre_classifier", ) self.classifier = keras.layers.Dense( 1, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward( DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ if input_ids is not None: num_choices = shape_list(input_ids)[1] seq_length = shape_list(input_ids)[2] else: num_choices = shape_list(inputs_embeds)[1] seq_length = shape_list(inputs_embeds)[2] flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None flat_inputs_embeds = ( tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3])) if inputs_embeds is not None else None ) distilbert_output = self.distilbert( flat_input_ids, flat_attention_mask, head_mask, flat_inputs_embeds, output_attentions, output_hidden_states, return_dict=return_dict, training=training, ) hidden_state = distilbert_output[0] # (bs, seq_len, dim) pooled_output = hidden_state[:, 0] # (bs, dim) pooled_output = self.pre_classifier(pooled_output) # (bs, dim) pooled_output = self.dropout(pooled_output, training=training) # (bs, dim) logits = self.classifier(pooled_output) reshaped_logits = tf.reshape(logits, (-1, num_choices)) loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits) if not return_dict: output = (reshaped_logits,) + distilbert_output[1:] return ((loss,) + output) if loss is not None else output return TFMultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=distilbert_output.hidden_states, attentions=distilbert_output.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "distilbert", None) is not None: with tf.name_scope(self.distilbert.name): self.distilbert.build(None) if getattr(self, "pre_classifier", None) is not None: with tf.name_scope(self.pre_classifier.name): self.pre_classifier.build([None, None, self.config.dim]) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.dim]) @add_start_docstrings( """ DistilBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`). """, DISTILBERT_START_DOCSTRING, ) class TFDistilBertForQuestionAnswering(TFDistilBertPreTrainedModel, TFQuestionAnsweringLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.distilbert = TFDistilBertMainLayer(config, name="distilbert") self.qa_outputs = keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs" ) assert config.num_labels == 2, f"Incorrect number of labels {config.num_labels} instead of 2" self.dropout = keras.layers.Dropout(config.qa_dropout) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, start_positions: np.ndarray | tf.Tensor | None = None, end_positions: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]: r""" start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ distilbert_output = self.distilbert( input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) hidden_states = distilbert_output[0] # (bs, max_query_len, dim) hidden_states = self.dropout(hidden_states, training=training) # (bs, max_query_len, dim) logits = self.qa_outputs(hidden_states) # (bs, max_query_len, 2) start_logits, end_logits = tf.split(logits, 2, axis=-1) start_logits = tf.squeeze(start_logits, axis=-1) end_logits = tf.squeeze(end_logits, axis=-1) loss = None if start_positions is not None and end_positions is not None: labels = {"start_position": start_positions} labels["end_position"] = end_positions loss = self.hf_compute_loss(labels, (start_logits, end_logits)) if not return_dict: output = (start_logits, end_logits) + distilbert_output[1:] return ((loss,) + output) if loss is not None else output return TFQuestionAnsweringModelOutput( loss=loss, start_logits=start_logits, end_logits=end_logits, hidden_states=distilbert_output.hidden_states, attentions=distilbert_output.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "distilbert", None) is not None: with tf.name_scope(self.distilbert.name): self.distilbert.build(None) if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.dim])
transformers/src/transformers/models/distilbert/modeling_tf_distilbert.py/0
{ "file_path": "transformers/src/transformers/models/distilbert/modeling_tf_distilbert.py", "repo_id": "transformers", "token_count": 21218 }
334
# coding=utf-8 # Copyright 2018 DPR Authors, The Hugging Face Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TensorFlow DPR model for Open Domain Question Answering.""" from __future__ import annotations from dataclasses import dataclass from typing import Tuple, Union import tensorflow as tf from ...modeling_tf_outputs import TFBaseModelOutputWithPooling from ...modeling_tf_utils import TFModelInputType, TFPreTrainedModel, get_initializer, keras, shape_list, unpack_inputs from ...utils import ( ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ..bert.modeling_tf_bert import TFBertMainLayer from .configuration_dpr import DPRConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "DPRConfig" TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = [ "facebook/dpr-ctx_encoder-single-nq-base", "facebook/dpr-ctx_encoder-multiset-base", ] TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = [ "facebook/dpr-question_encoder-single-nq-base", "facebook/dpr-question_encoder-multiset-base", ] TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST = [ "facebook/dpr-reader-single-nq-base", "facebook/dpr-reader-multiset-base", ] ########## # Outputs ########## @dataclass class TFDPRContextEncoderOutput(ModelOutput): r""" Class for outputs of [`TFDPRContextEncoder`]. Args: pooler_output (`tf.Tensor` of shape `(batch_size, embeddings_size)`): The DPR encoder outputs the *pooler_output* that corresponds to the context representation. Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer. This output is to be used to embed contexts for nearest neighbors queries with questions embeddings. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ pooler_output: tf.Tensor = None hidden_states: Tuple[tf.Tensor, ...] | None = None attentions: Tuple[tf.Tensor, ...] | None = None @dataclass class TFDPRQuestionEncoderOutput(ModelOutput): """ Class for outputs of [`TFDPRQuestionEncoder`]. Args: pooler_output (`tf.Tensor` of shape `(batch_size, embeddings_size)`): The DPR encoder outputs the *pooler_output* that corresponds to the question representation. Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer. This output is to be used to embed questions for nearest neighbors queries with context embeddings. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ pooler_output: tf.Tensor = None hidden_states: Tuple[tf.Tensor, ...] | None = None attentions: Tuple[tf.Tensor, ...] | None = None @dataclass class TFDPRReaderOutput(ModelOutput): """ Class for outputs of [`TFDPRReaderEncoder`]. Args: start_logits (`tf.Tensor` of shape `(n_passages, sequence_length)`): Logits of the start index of the span for each passage. end_logits (`tf.Tensor` of shape `(n_passages, sequence_length)`): Logits of the end index of the span for each passage. relevance_logits (`tf.Tensor` of shape `(n_passages, )`): Outputs of the QA classifier of the DPRReader that corresponds to the scores of each passage to answer the question, compared to all the other passages. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ start_logits: tf.Tensor = None end_logits: tf.Tensor = None relevance_logits: tf.Tensor = None hidden_states: Tuple[tf.Tensor, ...] | None = None attentions: Tuple[tf.Tensor, ...] | None = None class TFDPREncoderLayer(keras.layers.Layer): base_model_prefix = "bert_model" def __init__(self, config: DPRConfig, **kwargs): super().__init__(**kwargs) # resolve name conflict with TFBertMainLayer instead of TFBertModel self.bert_model = TFBertMainLayer(config, add_pooling_layer=False, name="bert_model") self.config = config if self.config.hidden_size <= 0: raise ValueError("Encoder hidden_size can't be zero") self.projection_dim = config.projection_dim if self.projection_dim > 0: self.encode_proj = keras.layers.Dense( config.projection_dim, kernel_initializer=get_initializer(config.initializer_range), name="encode_proj" ) @unpack_inputs def call( self, input_ids: tf.Tensor = None, attention_mask: tf.Tensor | None = None, token_type_ids: tf.Tensor | None = None, inputs_embeds: tf.Tensor | None = None, output_attentions: bool = None, output_hidden_states: bool = None, return_dict: bool = None, training: bool = False, ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor, ...]]: outputs = self.bert_model( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] pooled_output = sequence_output[:, 0, :] if self.projection_dim > 0: pooled_output = self.encode_proj(pooled_output) if not return_dict: return (sequence_output, pooled_output) + outputs[1:] return TFBaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @property def embeddings_size(self) -> int: if self.projection_dim > 0: return self.projection_dim return self.bert_model.config.hidden_size def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "bert_model", None) is not None: with tf.name_scope(self.bert_model.name): self.bert_model.build(None) if getattr(self, "encode_proj", None) is not None: with tf.name_scope(self.encode_proj.name): self.encode_proj.build(None) class TFDPRSpanPredictorLayer(keras.layers.Layer): base_model_prefix = "encoder" def __init__(self, config: DPRConfig, **kwargs): super().__init__(**kwargs) self.config = config self.encoder = TFDPREncoderLayer(config, name="encoder") self.qa_outputs = keras.layers.Dense( 2, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs" ) self.qa_classifier = keras.layers.Dense( 1, kernel_initializer=get_initializer(config.initializer_range), name="qa_classifier" ) @unpack_inputs def call( self, input_ids: tf.Tensor = None, attention_mask: tf.Tensor | None = None, inputs_embeds: tf.Tensor | None = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = False, training: bool = False, ) -> Union[TFDPRReaderOutput, Tuple[tf.Tensor, ...]]: # notations: N - number of questions in a batch, M - number of passages per questions, L - sequence length n_passages, sequence_length = shape_list(input_ids) if input_ids is not None else shape_list(inputs_embeds)[:2] # feed encoder outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] # compute logits logits = self.qa_outputs(sequence_output) start_logits, end_logits = tf.split(logits, 2, axis=-1) start_logits = tf.squeeze(start_logits, axis=-1) end_logits = tf.squeeze(end_logits, axis=-1) relevance_logits = self.qa_classifier(sequence_output[:, 0, :]) # resize start_logits = tf.reshape(start_logits, [n_passages, sequence_length]) end_logits = tf.reshape(end_logits, [n_passages, sequence_length]) relevance_logits = tf.reshape(relevance_logits, [n_passages]) if not return_dict: return (start_logits, end_logits, relevance_logits) + outputs[2:] return TFDPRReaderOutput( start_logits=start_logits, end_logits=end_logits, relevance_logits=relevance_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.encoder.embeddings_size]) if getattr(self, "qa_classifier", None) is not None: with tf.name_scope(self.qa_classifier.name): self.qa_classifier.build([None, None, self.encoder.embeddings_size]) class TFDPRSpanPredictor(TFPreTrainedModel): base_model_prefix = "encoder" def __init__(self, config: DPRConfig, **kwargs): super().__init__(config, **kwargs) self.encoder = TFDPRSpanPredictorLayer(config) @unpack_inputs def call( self, input_ids: tf.Tensor = None, attention_mask: tf.Tensor | None = None, token_type_ids: tf.Tensor | None = None, inputs_embeds: tf.Tensor | None = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = False, training: bool = False, ) -> Union[TFDPRReaderOutput, Tuple[tf.Tensor, ...]]: outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs class TFDPREncoder(TFPreTrainedModel): base_model_prefix = "encoder" def __init__(self, config: DPRConfig, **kwargs): super().__init__(config, **kwargs) self.encoder = TFDPREncoderLayer(config) @unpack_inputs def call( self, input_ids: tf.Tensor = None, attention_mask: tf.Tensor | None = None, token_type_ids: tf.Tensor | None = None, inputs_embeds: tf.Tensor | None = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = False, training: bool = False, ) -> Union[TFDPRReaderOutput, Tuple[tf.Tensor, ...]]: outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs ################## # PreTrainedModel ################## class TFDPRPretrainedContextEncoder(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = DPRConfig base_model_prefix = "ctx_encoder" class TFDPRPretrainedQuestionEncoder(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = DPRConfig base_model_prefix = "question_encoder" class TFDPRPretrainedReader(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = DPRConfig base_model_prefix = "reader" ############### # Actual Models ############### TF_DPR_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a Tensorflow [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Parameters: config ([`DPRConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ TF_DPR_ENCODERS_INPUTS_DOCSTRING = r""" Args: input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. To match pretraining, DPR input sequence should be formatted with [CLS] and [SEP] tokens as follows: (a) For sequence pairs (for a pair title+text for example): ``` tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 ``` (b) For single sequences (for a question for example): ``` tokens: [CLS] the dog is hairy . [SEP] token_type_ids: 0 0 0 0 0 0 0 ``` DPR is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) inputs_embeds (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ TF_DPR_READER_INPUTS_DOCSTRING = r""" Args: input_ids (`Numpy array` or `tf.Tensor` of shapes `(n_passages, sequence_length)`): Indices of input sequence tokens in the vocabulary. It has to be a sequence triplet with 1) the question and 2) the passages titles and 3) the passages texts To match pretraining, DPR `input_ids` sequence should be formatted with [CLS] and [SEP] with the format: `[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>` DPR is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. Indices can be obtained using [`DPRReaderTokenizer`]. See this class documentation for more details. attention_mask (`Numpy array` or `tf.Tensor` of shape `(n_passages, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) inputs_embeds (`Numpy array` or `tf.Tensor` of shape `(n_passages, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @add_start_docstrings( "The bare DPRContextEncoder transformer outputting pooler outputs as context representations.", TF_DPR_START_DOCSTRING, ) class TFDPRContextEncoder(TFDPRPretrainedContextEncoder): def __init__(self, config: DPRConfig, *args, **kwargs): super().__init__(config, *args, **kwargs) self.ctx_encoder = TFDPREncoderLayer(config, name="ctx_encoder") def get_input_embeddings(self): try: return self.ctx_encoder.bert_model.get_input_embeddings() except AttributeError: self.build() return self.ctx_encoder.bert_model.get_input_embeddings() @unpack_inputs @add_start_docstrings_to_model_forward(TF_DPR_ENCODERS_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFDPRContextEncoderOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: TFModelInputType | None = None, attention_mask: tf.Tensor | None = None, token_type_ids: tf.Tensor | None = None, inputs_embeds: tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, ) -> TFDPRContextEncoderOutput | Tuple[tf.Tensor, ...]: r""" Return: Examples: ```python >>> from transformers import TFDPRContextEncoder, DPRContextEncoderTokenizer >>> tokenizer = DPRContextEncoderTokenizer.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base") >>> model = TFDPRContextEncoder.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base", from_pt=True) >>> input_ids = tokenizer("Hello, is my dog cute ?", return_tensors="tf")["input_ids"] >>> embeddings = model(input_ids).pooler_output ``` """ if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if attention_mask is None: attention_mask = ( tf.ones(input_shape, dtype=tf.dtypes.int32) if input_ids is None else (input_ids != self.config.pad_token_id) ) if token_type_ids is None: token_type_ids = tf.zeros(input_shape, dtype=tf.dtypes.int32) outputs = self.ctx_encoder( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) if not return_dict: return outputs[1:] return TFDPRContextEncoderOutput( pooler_output=outputs.pooler_output, hidden_states=outputs.hidden_states, attentions=outputs.attentions ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "ctx_encoder", None) is not None: with tf.name_scope(self.ctx_encoder.name): self.ctx_encoder.build(None) @add_start_docstrings( "The bare DPRQuestionEncoder transformer outputting pooler outputs as question representations.", TF_DPR_START_DOCSTRING, ) class TFDPRQuestionEncoder(TFDPRPretrainedQuestionEncoder): def __init__(self, config: DPRConfig, *args, **kwargs): super().__init__(config, *args, **kwargs) self.question_encoder = TFDPREncoderLayer(config, name="question_encoder") def get_input_embeddings(self): try: return self.question_encoder.bert_model.get_input_embeddings() except AttributeError: self.build() return self.question_encoder.bert_model.get_input_embeddings() @unpack_inputs @add_start_docstrings_to_model_forward(TF_DPR_ENCODERS_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFDPRQuestionEncoderOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: TFModelInputType | None = None, attention_mask: tf.Tensor | None = None, token_type_ids: tf.Tensor | None = None, inputs_embeds: tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, ) -> TFDPRQuestionEncoderOutput | Tuple[tf.Tensor, ...]: r""" Return: Examples: ```python >>> from transformers import TFDPRQuestionEncoder, DPRQuestionEncoderTokenizer >>> tokenizer = DPRQuestionEncoderTokenizer.from_pretrained("facebook/dpr-question_encoder-single-nq-base") >>> model = TFDPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base", from_pt=True) >>> input_ids = tokenizer("Hello, is my dog cute ?", return_tensors="tf")["input_ids"] >>> embeddings = model(input_ids).pooler_output ``` """ if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if attention_mask is None: attention_mask = ( tf.ones(input_shape, dtype=tf.dtypes.int32) if input_ids is None else (input_ids != self.config.pad_token_id) ) if token_type_ids is None: token_type_ids = tf.zeros(input_shape, dtype=tf.dtypes.int32) outputs = self.question_encoder( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) if not return_dict: return outputs[1:] return TFDPRQuestionEncoderOutput( pooler_output=outputs.pooler_output, hidden_states=outputs.hidden_states, attentions=outputs.attentions ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "question_encoder", None) is not None: with tf.name_scope(self.question_encoder.name): self.question_encoder.build(None) @add_start_docstrings( "The bare DPRReader transformer outputting span predictions.", TF_DPR_START_DOCSTRING, ) class TFDPRReader(TFDPRPretrainedReader): def __init__(self, config: DPRConfig, *args, **kwargs): super().__init__(config, *args, **kwargs) self.span_predictor = TFDPRSpanPredictorLayer(config, name="span_predictor") def get_input_embeddings(self): try: return self.span_predictor.encoder.bert_model.get_input_embeddings() except AttributeError: self.build() return self.span_predictor.encoder.bert_model.get_input_embeddings() @unpack_inputs @add_start_docstrings_to_model_forward(TF_DPR_READER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFDPRReaderOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: TFModelInputType | None = None, attention_mask: tf.Tensor | None = None, inputs_embeds: tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, ) -> TFDPRReaderOutput | Tuple[tf.Tensor, ...]: r""" Return: Examples: ```python >>> from transformers import TFDPRReader, DPRReaderTokenizer >>> tokenizer = DPRReaderTokenizer.from_pretrained("facebook/dpr-reader-single-nq-base") >>> model = TFDPRReader.from_pretrained("facebook/dpr-reader-single-nq-base", from_pt=True) >>> encoded_inputs = tokenizer( ... questions=["What is love ?"], ... titles=["Haddaway"], ... texts=["'What Is Love' is a song recorded by the artist Haddaway"], ... return_tensors="tf", ... ) >>> outputs = model(encoded_inputs) >>> start_logits = outputs.start_logits >>> end_logits = outputs.end_logits >>> relevance_logits = outputs.relevance_logits ``` """ if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if attention_mask is None: attention_mask = tf.ones(input_shape, dtype=tf.dtypes.int32) return self.span_predictor( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "span_predictor", None) is not None: with tf.name_scope(self.span_predictor.name): self.span_predictor.build(None)
transformers/src/transformers/models/dpr/modeling_tf_dpr.py/0
{ "file_path": "transformers/src/transformers/models/dpr/modeling_tf_dpr.py", "repo_id": "transformers", "token_count": 13909 }
335
# coding=utf-8 # Copyright 2023 Xuan Ouyang, Shuohuan Wang, Chao Pang, Yu Sun, Hao Tian, Hua Wu, Haifeng Wang and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for Ernie-M.""" import io import os import unicodedata from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) SPIECE_UNDERLINE = "▁" VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"} RESOURCE_FILES_NAMES = { "sentencepiece_model_file": "sentencepiece.bpe.model", "vocab_file": "vocab.txt", } PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt", "ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt", }, "sentencepiece_model_file": { "ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model", "ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "ernie-m-base": 514, "ernie-m-large": 514, } PRETRAINED_INIT_CONFIGURATION = { "ernie-m-base": {"do_lower_case": False}, "ernie-m-large": {"do_lower_case": False}, } # Adapted from paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer class ErnieMTokenizer(PreTrainedTokenizer): r""" Constructs a Ernie-M tokenizer. It uses the `sentencepiece` tools to cut the words to sub-words. Args: sentencepiece_model_file (`str`): The file path of sentencepiece model. vocab_file (`str`, *optional*): The file path of the vocabulary. do_lower_case (`str`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. unk_token (`str`, *optional*, defaults to `"[UNK]"`): A special token representing the `unknown (out-of-vocabulary)` token. An unknown token is set to be `unk_token` inorder to be converted to an ID. sep_token (`str`, *optional*, defaults to `"[SEP]"`): A special token separating two different sentences in the same input. pad_token (`str`, *optional*, defaults to `"[PAD]"`): A special token used to make arrays of tokens the same size for batching purposes. cls_token (`str`, *optional*, defaults to `"[CLS]"`): A special token used for sequence classification. It is the last token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): A special token representing a masked token. This is the token used in the masked language modeling task which the model tries to predict the original unmasked ones. """ # Ernie-M model doesn't have token_type embedding. model_input_names: List[str] = ["input_ids"] vocab_files_names = VOCAB_FILES_NAMES pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP resource_files_names = RESOURCE_FILES_NAMES def __init__( self, sentencepiece_model_ckpt, vocab_file=None, do_lower_case=False, encoding="utf8", unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", sp_model_kwargs: Optional[Dict[str, Any]] = None, **kwargs, ) -> None: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs self.do_lower_case = do_lower_case self.sentencepiece_model_ckpt = sentencepiece_model_ckpt self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(sentencepiece_model_ckpt) # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning if vocab_file is not None: self.vocab = self.load_vocab(filepath=vocab_file) else: self.vocab = {self.sp_model.id_to_piece(id): id for id in range(self.sp_model.get_piece_size())} self.reverse_vocab = {v: k for k, v in self.vocab.items()} super().__init__( do_lower_case=do_lower_case, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, vocab_file=vocab_file, encoding=encoding, sp_model_kwargs=self.sp_model_kwargs, **kwargs, ) def get_offset_mapping(self, text): if text is None: return None split_tokens = self.tokenize(text) normalized_text, char_mapping = "", [] for i, ch in enumerate(text): if ch in self.SP_CHAR_MAPPING: ch = self.SP_CHAR_MAPPING.get(ch) else: ch = unicodedata.normalize("NFKC", ch) if self.is_whitespace(ch): continue normalized_text += ch char_mapping.extend([i] * len(ch)) text, token_mapping, offset = normalized_text, [], 0 if self.do_lower_case: text = text.lower() for token in split_tokens: if token[:1] == "▁": token = token[1:] start = text[offset:].index(token) + offset end = start + len(token) token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1)) offset = end return token_mapping @property def vocab_size(self): return len(self.vocab) def get_vocab(self): return dict(self.vocab, **self.added_tokens_encoder) def __getstate__(self): state = self.__dict__.copy() state["sp_model"] = None return state def __setstate__(self, d): self.__dict__ = d # for backward compatibility if not hasattr(self, "sp_model_kwargs"): self.sp_model_kwargs = {} self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.sentencepiece_model_ckpt) def clean_text(self, text): """Performs invalid character removal and whitespace cleanup on text.""" return "".join((self.SP_CHAR_MAPPING.get(c, c) for c in text)) def _tokenize(self, text, enable_sampling=False, nbest_size=64, alpha=0.1): """Tokenize a string.""" if self.sp_model_kwargs.get("enable_sampling") is True: enable_sampling = True if self.sp_model_kwargs.get("alpha") is not None: alpha = self.sp_model_kwargs.get("alpha") if self.sp_model_kwargs.get("nbest_size") is not None: nbest_size = self.sp_model_kwargs.get("nbest_size") if not enable_sampling: pieces = self.sp_model.EncodeAsPieces(text) else: pieces = self.sp_model.SampleEncodeAsPieces(text, nbest_size, alpha) new_pieces = [] for pi, piece in enumerate(pieces): if piece == SPIECE_UNDERLINE: if not pieces[pi + 1].startswith(SPIECE_UNDERLINE) and pi != 0: new_pieces.append(SPIECE_UNDERLINE) continue else: continue lst_i = 0 for i, chunk in enumerate(piece): if chunk == SPIECE_UNDERLINE: continue if self.is_ch_char(chunk) or self.is_punct(chunk): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i]) new_pieces.append(chunk) lst_i = i + 1 elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i]) lst_i = i elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i]) lst_i = i if len(piece) > lst_i: new_pieces.append(piece[lst_i:]) return new_pieces def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (strings for sub-words) in a single string.""" out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip() return out_string def convert_ids_to_string(self, ids): """ Converts a sequence of tokens (strings for sub-words) in a single string. """ tokens = self.convert_ids_to_tokens(ids) out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip() return out_string # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning def _convert_token_to_id(self, token): return self.vocab.get(token, self.vocab.get(self.unk_token)) # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.reverse_vocab.get(index, self.unk_token) def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): r""" Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An ErnieM sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of input_id with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] _cls = [self.cls_token_id] _sep = [self.sep_token_id] return _cls + token_ids_0 + _sep + _sep + token_ids_1 + _sep def build_offset_mapping_with_special_tokens(self, offset_mapping_0, offset_mapping_1=None): r""" Build offset map from a pair of offset map by concatenating and adding offsets of special tokens. An Ernie-M offset_mapping has the following format: - single sequence: `(0,0) X (0,0)` - pair of sequences: `(0,0) A (0,0) (0,0) B (0,0)` Args: offset_mapping_ids_0 (`List[tuple]`): List of char offsets to which the special tokens will be added. offset_mapping_ids_1 (`List[tuple]`, *optional*): Optional second list of wordpiece offsets for offset mapping pairs. Returns: `List[tuple]`: List of wordpiece offsets with the appropriate offsets of special tokens. """ if offset_mapping_1 is None: return [(0, 0)] + offset_mapping_0 + [(0, 0)] return [(0, 0)] + offset_mapping_0 + [(0, 0), (0, 0)] + offset_mapping_1 + [(0, 0)] def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False): r""" Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `encode` method. Args: token_ids_0 (`List[int]`): List of ids of the first sequence. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`str`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: The list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: if token_ids_1 is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_0] if token_ids_1 is not None: return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1] return [1] + ([0] * len(token_ids_0)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create the token type IDs corresponding to the sequences passed. [What are token type IDs?](../glossary#token-type-ids) Should be overridden in a subclass if the model has a special way of building: those. Args: token_ids_0 (`List[int]`): The first tokenized sequence. token_ids_1 (`List[int]`, *optional*): The second tokenized sequence. Returns: `List[int]`: The token type ids. """ # called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method if token_ids_1 is None: # [CLS] X [SEP] return (len(token_ids_0) + 2) * [0] # [CLS] A [SEP] [SEP] B [SEP] return [0] * (len(token_ids_0) + 1) + [1] * (len(token_ids_1) + 3) def is_ch_char(self, char): """ is_ch_char """ if "\u4e00" <= char <= "\u9fff": return True return False def is_alpha(self, char): """ is_alpha """ if ("a" <= char <= "z") or ("A" <= char <= "Z"): return True return False def is_punct(self, char): """ is_punct """ if char in ",;:.?!~,;:。?!《》【】": return True return False def is_whitespace(self, char): """ is whitespace """ if char == " " or char == "\t" or char == "\n" or char == "\r": return True if len(char) == 1: cat = unicodedata.category(char) if cat == "Zs": return True return False def load_vocab(self, filepath): token_to_idx = {} with io.open(filepath, "r", encoding="utf-8") as f: for index, line in enumerate(f): token = line.rstrip("\n") token_to_idx[token] = int(index) return token_to_idx def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: index = 0 if os.path.isdir(save_directory): vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory with open(vocab_file, "w", encoding="utf-8") as writer: for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." " Please check that the vocabulary is not corrupted!" ) index = token_index writer.write(token + "\n") index += 1 tokenizer_model_file = os.path.join(save_directory, "sentencepiece.bpe.model") with open(tokenizer_model_file, "wb") as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (vocab_file,)
transformers/src/transformers/models/ernie_m/tokenization_ernie_m.py/0
{ "file_path": "transformers/src/transformers/models/ernie_m/tokenization_ernie_m.py", "repo_id": "transformers", "token_count": 7974 }
336
# coding=utf-8 # Copyright 2022 Meta and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for ESM.""" import os from typing import List, Optional from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt", "facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "facebook/esm2_t6_8M_UR50D": 1024, "facebook/esm2_t12_35M_UR50D": 1024, } def load_vocab_file(vocab_file): with open(vocab_file, "r") as f: lines = f.read().splitlines() return [l.strip() for l in lines] class EsmTokenizer(PreTrainedTokenizer): """ Constructs an ESM tokenizer. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file, unk_token="<unk>", cls_token="<cls>", pad_token="<pad>", mask_token="<mask>", eos_token="<eos>", **kwargs, ): self.all_tokens = load_vocab_file(vocab_file) self._id_to_token = dict(enumerate(self.all_tokens)) self._token_to_id = {tok: ind for ind, tok in enumerate(self.all_tokens)} super().__init__( unk_token=unk_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, eos_token=eos_token, **kwargs, ) # TODO, all the tokens are added? But they are also part of the vocab... bit strange. # none of them are special, but they all need special splitting. self.unique_no_split_tokens = self.all_tokens self._update_trie(self.unique_no_split_tokens) def _convert_id_to_token(self, index: int) -> str: return self._id_to_token.get(index, self.unk_token) def _convert_token_to_id(self, token: str) -> int: return self._token_to_id.get(token, self._token_to_id.get(self.unk_token)) def _tokenize(self, text, **kwargs): return text.split() def get_vocab(self): base_vocab = self._token_to_id.copy() base_vocab.update(self.added_tokens_encoder) return base_vocab def token_to_id(self, token: str) -> int: return self._token_to_id.get(token, self._token_to_id.get(self.unk_token)) def id_to_token(self, index: int) -> str: return self._id_to_token.get(index, self.unk_token) def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: cls = [self.cls_token_id] sep = [self.eos_token_id] # No sep token in ESM vocabulary if token_ids_1 is None: if self.eos_token_id is None: return cls + token_ids_0 else: return cls + token_ids_0 + sep elif self.eos_token_id is None: raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!") return cls + token_ids_0 + sep + token_ids_1 + sep # Multiple inputs always have an EOS token def get_special_tokens_mask( self, token_ids_0: List, token_ids_1: Optional[List] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods. Args: token_ids_0 (`List[int]`): List of ids of the first sequence. token_ids_1 (`List[int]`, *optional*): List of ids of the second sequence. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: if token_ids_1 is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if token in self.all_special_ids else 0 for token in token_ids_0] mask = [1] + ([0] * len(token_ids_0)) + [1] if token_ids_1 is not None: mask += [0] * len(token_ids_1) + [1] return mask def save_vocabulary(self, save_directory, filename_prefix): vocab_file = os.path.join(save_directory, (filename_prefix + "-" if filename_prefix else "") + "vocab.txt") with open(vocab_file, "w") as f: f.write("\n".join(self.all_tokens)) return (vocab_file,) @property def vocab_size(self) -> int: return len(self.all_tokens)
transformers/src/transformers/models/esm/tokenization_esm.py/0
{ "file_path": "transformers/src/transformers/models/esm/tokenization_esm.py", "repo_id": "transformers", "token_count": 2598 }
337
# coding=utf-8 # Copyright 2019-present CNRS, Facebook Inc. and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for Flaubert.""" import json import os import re import unicodedata from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = { "vocab_file": "vocab.json", "merges_file": "merges.txt", } PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "flaubert/flaubert_small_cased": ( "https://huggingface.co/flaubert/flaubert_small_cased/resolve/main/vocab.json" ), "flaubert/flaubert_base_uncased": ( "https://huggingface.co/flaubert/flaubert_base_uncased/resolve/main/vocab.json" ), "flaubert/flaubert_base_cased": "https://huggingface.co/flaubert/flaubert_base_cased/resolve/main/vocab.json", "flaubert/flaubert_large_cased": ( "https://huggingface.co/flaubert/flaubert_large_cased/resolve/main/vocab.json" ), }, "merges_file": { "flaubert/flaubert_small_cased": ( "https://huggingface.co/flaubert/flaubert_small_cased/resolve/main/merges.txt" ), "flaubert/flaubert_base_uncased": ( "https://huggingface.co/flaubert/flaubert_base_uncased/resolve/main/merges.txt" ), "flaubert/flaubert_base_cased": "https://huggingface.co/flaubert/flaubert_base_cased/resolve/main/merges.txt", "flaubert/flaubert_large_cased": ( "https://huggingface.co/flaubert/flaubert_large_cased/resolve/main/merges.txt" ), }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "flaubert/flaubert_small_cased": 512, "flaubert/flaubert_base_uncased": 512, "flaubert/flaubert_base_cased": 512, "flaubert/flaubert_large_cased": 512, } PRETRAINED_INIT_CONFIGURATION = { "flaubert/flaubert_small_cased": {"do_lowercase": False}, "flaubert/flaubert_base_uncased": {"do_lowercase": True}, "flaubert/flaubert_base_cased": {"do_lowercase": False}, "flaubert/flaubert_large_cased": {"do_lowercase": False}, } def convert_to_unicode(text): """ Converts `text` to Unicode (if it's not already), assuming UTF-8 input. """ def ensure_text(s, encoding="utf-8", errors="strict"): if isinstance(s, bytes): return s.decode(encoding, errors) elif isinstance(s, str): return s else: raise TypeError(f"not expecting type '{type(s)}'") return ensure_text(text, encoding="utf-8", errors="ignore") # Copied from transformers.models.xlm.tokenization_xlm.get_pairs def get_pairs(word): """ Return set of symbol pairs in a word. word is represented as tuple of symbols (symbols being variable-length strings) """ pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs # Copied from transformers.models.xlm.tokenization_xlm.replace_unicode_punct def replace_unicode_punct(text): """ Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/replace-unicode-punctuation.perl """ text = text.replace(",", ",") text = re.sub(r"。\s*", ". ", text) text = text.replace("、", ",") text = text.replace("”", '"') text = text.replace("“", '"') text = text.replace("∶", ":") text = text.replace(":", ":") text = text.replace("?", "?") text = text.replace("《", '"') text = text.replace("》", '"') text = text.replace(")", ")") text = text.replace("!", "!") text = text.replace("(", "(") text = text.replace(";", ";") text = text.replace("1", "1") text = text.replace("」", '"') text = text.replace("「", '"') text = text.replace("0", "0") text = text.replace("3", "3") text = text.replace("2", "2") text = text.replace("5", "5") text = text.replace("6", "6") text = text.replace("9", "9") text = text.replace("7", "7") text = text.replace("8", "8") text = text.replace("4", "4") text = re.sub(r".\s*", ". ", text) text = text.replace("~", "~") text = text.replace("’", "'") text = text.replace("…", "...") text = text.replace("━", "-") text = text.replace("〈", "<") text = text.replace("〉", ">") text = text.replace("【", "[") text = text.replace("】", "]") text = text.replace("%", "%") return text # Copied from transformers.models.xlm.tokenization_xlm.remove_non_printing_char def remove_non_printing_char(text): """ Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/remove-non-printing-char.perl """ output = [] for char in text: cat = unicodedata.category(char) if cat.startswith("C"): continue output.append(char) return "".join(output) class FlaubertTokenizer(PreTrainedTokenizer): """ Construct a Flaubert tokenizer. Based on Byte-Pair Encoding. The tokenization process is the following: - Moses preprocessing and tokenization. - Normalizing all inputs text. - The arguments `special_tokens` and the function `set_special_tokens`, can be used to add additional symbols (like "__classify__") to a vocabulary. - The argument `do_lowercase` controls lower casing (automatically set for pretrained vocabularies). This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Vocabulary file. merges_file (`str`): Merges file. do_lowercase (`bool`, *optional*, defaults to `False`): Controls lower casing. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"</s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"<special1>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. additional_special_tokens (`List[str]`, *optional*, defaults to `['<special0>', '<special1>', '<special2>', '<special3>', '<special4>', '<special5>', '<special6>', '<special7>', '<special8>', '<special9>']`): List of additional special tokens. lang2id (`Dict[str, int]`, *optional*): Dictionary mapping languages string identifiers to their IDs. id2lang (`Dict[int, str]`, *optional*): Dictionary mapping language IDs to their string identifiers. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self, vocab_file, merges_file, do_lowercase=False, unk_token="<unk>", bos_token="<s>", sep_token="</s>", pad_token="<pad>", cls_token="</s>", mask_token="<special1>", additional_special_tokens=[ "<special0>", "<special1>", "<special2>", "<special3>", "<special4>", "<special5>", "<special6>", "<special7>", "<special8>", "<special9>", ], lang2id=None, id2lang=None, **kwargs, ): do_lowercase_and_remove_accent = kwargs.pop("do_lowercase_and_remove_accent", None) if do_lowercase_and_remove_accent is not None: logger.warning( "`do_lowercase_and_remove_accent` is passed as a keyword argument, but this won't do anything." " `FlaubertTokenizer` will always set it to `False`." ) # always `False` self.do_lowercase_and_remove_accent = False self.do_lowercase = do_lowercase try: import sacremoses except ImportError: raise ImportError( "You need to install sacremoses to use FlaubertTokenizer. " "See https://pypi.org/project/sacremoses/ for installation." ) self.sm = sacremoses # cache of sm.MosesPunctNormalizer instance self.cache_moses_punct_normalizer = {} # cache of sm.MosesTokenizer instance self.cache_moses_tokenizer = {} self.lang_with_custom_tokenizer = {"zh", "th", "ja"} self.lang2id = lang2id self.id2lang = id2lang if lang2id is not None and id2lang is not None: assert len(lang2id) == len(id2lang) self.ja_word_tokenizer = None self.zh_word_tokenizer = None with open(vocab_file, encoding="utf-8") as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for k, v in self.encoder.items()} with open(merges_file, encoding="utf-8") as merges_handle: merges = merges_handle.read().split("\n")[:-1] merges = [tuple(merge.split()[:2]) for merge in merges] self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {} super().__init__( unk_token=unk_token, bos_token=bos_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, additional_special_tokens=additional_special_tokens, lang2id=lang2id, id2lang=id2lang, **kwargs, ) @property # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.do_lower_case def do_lower_case(self): return self.do_lowercase_and_remove_accent # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.moses_punct_norm def moses_punct_norm(self, text, lang): if lang not in self.cache_moses_punct_normalizer: punct_normalizer = self.sm.MosesPunctNormalizer(lang=lang) self.cache_moses_punct_normalizer[lang] = punct_normalizer else: punct_normalizer = self.cache_moses_punct_normalizer[lang] return punct_normalizer.normalize(text) # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.moses_tokenize def moses_tokenize(self, text, lang): if lang not in self.cache_moses_tokenizer: moses_tokenizer = self.sm.MosesTokenizer(lang=lang) self.cache_moses_tokenizer[lang] = moses_tokenizer else: moses_tokenizer = self.cache_moses_tokenizer[lang] return moses_tokenizer.tokenize(text, return_str=False, escape=False) # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.moses_pipeline def moses_pipeline(self, text, lang): text = replace_unicode_punct(text) text = self.moses_punct_norm(text, lang) text = remove_non_printing_char(text) return text # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.ja_tokenize def ja_tokenize(self, text): if self.ja_word_tokenizer is None: try: import Mykytea self.ja_word_tokenizer = Mykytea.Mykytea( f"-model {os.path.expanduser('~')}/local/share/kytea/model.bin" ) except (AttributeError, ImportError): logger.error( "Make sure you install KyTea (https://github.com/neubig/kytea) and it's python wrapper" " (https://github.com/chezou/Mykytea-python) with the following steps" ) logger.error("1. git clone [email protected]:neubig/kytea.git && cd kytea") logger.error("2. autoreconf -i") logger.error("3. ./configure --prefix=$HOME/local") logger.error("4. make && make install") logger.error("5. pip install kytea") raise return list(self.ja_word_tokenizer.getWS(text)) @property # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.vocab_size def vocab_size(self): return len(self.encoder) # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.get_vocab def get_vocab(self): return dict(self.encoder, **self.added_tokens_encoder) # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.bpe def bpe(self, token): word = tuple(token[:-1]) + (token[-1] + "</w>",) if token in self.cache: return self.cache[token] pairs = get_pairs(word) if not pairs: return token + "</w>" while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if word[i] == first and i < len(word) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = " ".join(word) if word == "\n </w>": word = "\n</w>" self.cache[token] = word return word def preprocess_text(self, text): text = text.replace("``", '"').replace("''", '"') text = convert_to_unicode(text) text = unicodedata.normalize("NFC", text) if self.do_lowercase: text = text.lower() return text def _tokenize(self, text, bypass_tokenizer=False): """ Tokenize a string given language code using Moses. Details of tokenization: - [sacremoses](https://github.com/alvations/sacremoses): port of Moses - Install with `pip install sacremoses` Args: - bypass_tokenizer: Allow users to preprocess and tokenize the sentences externally (default = False) (bool). If True, we only apply BPE. Returns: List of tokens. """ lang = "fr" if lang and self.lang2id and lang not in self.lang2id: logger.error( "Supplied language code not found in lang2id mapping. Please check that your language is supported by" " the loaded pretrained model." ) if bypass_tokenizer: text = text.split() else: text = self.preprocess_text(text) text = self.moses_pipeline(text, lang=lang) text = self.moses_tokenize(text, lang=lang) split_tokens = [] for token in text: if token: split_tokens.extend(list(self.bpe(token).split(" "))) return split_tokens # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer._convert_token_to_id def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.encoder.get(token, self.encoder.get(self.unk_token)) # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer._convert_id_to_token def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.decoder.get(index, self.unk_token) # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.convert_tokens_to_string def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" out_string = "".join(tokens).replace("</w>", " ").strip() return out_string # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.build_inputs_with_special_tokens def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An XLM sequence has the following format: - single sequence: `<s> X </s>` - pair of sequences: `<s> A </s> B </s>` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ bos = [self.bos_token_id] sep = [self.sep_token_id] if token_ids_1 is None: return bos + token_ids_0 + sep return bos + token_ids_0 + sep + token_ids_1 + sep # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.get_special_tokens_mask def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is not None: return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] return [1] + ([0] * len(token_ids_0)) + [1] # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.create_token_type_ids_from_sequences def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLM sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.save_vocabulary def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) merge_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") index = 0 with open(merge_file, "w", encoding="utf-8") as writer: for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) index = token_index writer.write(" ".join(bpe_tokens) + "\n") index += 1 return vocab_file, merge_file # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.__getstate__ def __getstate__(self): state = self.__dict__.copy() state["sm"] = None return state # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.__setstate__ def __setstate__(self, d): self.__dict__ = d try: import sacremoses except ImportError: raise ImportError( "You need to install sacremoses to use XLMTokenizer. " "See https://pypi.org/project/sacremoses/ for installation." ) self.sm = sacremoses
transformers/src/transformers/models/flaubert/tokenization_flaubert.py/0
{ "file_path": "transformers/src/transformers/models/flaubert/tokenization_flaubert.py", "repo_id": "transformers", "token_count": 10882 }
338