text
stringlengths
7
328k
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
459
from argparse import ArgumentParser from json import dump from logging import basicConfig, getLogger from os import linesep, remove from os.path import exists from tempfile import NamedTemporaryFile from typing import Dict, List, Tuple from requests import get from sentencepiece import SentencePieceProcessor from tqdm import trange, tqdm basicConfig() logger = getLogger() class SentencePieceExtractor: """ Extractor implementation for SentencePiece trained models. https://github.com/google/sentencepiece """ def __init__(self, model: str): # Get SentencePiece self.sp = SentencePieceProcessor() self.sp.Load(model) def extract(self) -> Tuple[Dict[str, int], List[Tuple]]: sp = self.sp vocab = {sp.id_to_piece(index): index for index in trange(sp.GetPieceSize())} # Merges merges = [] for piece_l in tqdm(vocab.keys(), total=sp.GetPieceSize()): for piece_r in vocab.keys(): merge = f"{piece_l}{piece_r}" piece_id = vocab.get(merge, None) if piece_id: merges += [(piece_l, piece_r, piece_id)] merges = sorted(merges, key=lambda val: val[2]) merges = [(val[0], val[1]) for val in merges] return vocab, merges class YouTokenToMeExtractor: """ Extractor implementation for YouTokenToMe trained models format. Model are as follow: vocab_size nb_merges piece piece_id ...(repeated vocab_size) piece_id_left piece_id_right piece_id ...(repeated nb merges) """ def __init__(self, model: str): self._model = model def extract(self) -> Tuple[Dict[str, int], List[Tuple]]: with open(self._model, "r") as model_f: # Retrieve information nb_pieces, nb_merges = map(int, model_f.readline().split()) vocab, merges = {}, [] # Vocab for _ in trange(nb_pieces): piece, piece_id = map(int, model_f.readline().split()) vocab[piece_id] = chr(piece) # Merges for _ in trange(nb_merges): piece_id_l, piece_id_r, piece = map(int, model_f.readline().split()) piece_l, piece_r = vocab[piece_id_l], vocab[piece_id_r] vocab[piece] = f"{piece_l}{piece_r}" merges += [(piece_l, piece_r)] # Special tokens unk, pad, bos, eos = map(int, model_f.readline().split()) vocab[unk] = "<unk>" vocab[pad] = "<pad>" vocab[bos] = "<bos>" vocab[eos] = "<eos>" # Invert key and value for vocab vocab = dict(zip(vocab.values(), vocab.keys())) return vocab, merges if __name__ == "__main__": parser = ArgumentParser("SentencePiece vocab extractor") parser.add_argument( "--provider", type=str, required=True, choices=["sentencepiece", "youtokentome"], help="Indicate the format of the file.", ) parser.add_argument("--model", type=str, required=True, help="SentencePiece model to extract vocab from.") parser.add_argument( "--vocab-output-path", type=str, required=True, help="Path where the vocab.json file will be extracted", ) parser.add_argument( "--merges-output-path", type=str, required=True, help="Path where the merges file will be extracted", ) # Parse cli arguments args = parser.parse_args() try: if args.model.startswith("http"): # Saving model with NamedTemporaryFile("wb", delete=False) as f: logger.info("Writing content from {} to {}".format(args.model, f.name)) response = get(args.model, allow_redirects=True) f.write(response.content) args.remote_model = args.model args.model = f.name # Allocate extractor extractor = SentencePieceExtractor if args.provider == "sentencepiece" else YouTokenToMeExtractor extractor = extractor(args.model) logger.info(f"Using {type(extractor).__name__}") # Open output files and let's extract model information with open(args.vocab_output_path, "w") as vocab_f: with open(args.merges_output_path, "w") as merges_f: # Do the extraction vocab, merges = extractor.extract() # Save content dump(vocab, vocab_f) merges_f.writelines(map(lambda x: f"{x[0]} {x[1]}{linesep}", merges)) finally: # If model was downloaded from internet we need to cleanup the tmp folder. if hasattr(args, "remote_model") and exists(args.model): remove(args.model)
tokenizers/bindings/python/scripts/sentencepiece_extractor.py/0
{ "file_path": "tokenizers/bindings/python/scripts/sentencepiece_extractor.py", "repo_id": "tokenizers", "token_count": 2231 }
235
use super::regex::PyRegex; use super::{DestroyPtr, RefMutContainer, RefMutGuard}; use crate::error::ToPyResult; use pyo3::exceptions; use pyo3::prelude::*; use pyo3::types::*; use tk::normalizer::{char_to_bytes, NormalizedString, Range, SplitDelimiterBehavior}; use tk::pattern::Pattern; /// Represents a Pattern as used by `NormalizedString` #[derive(Clone, FromPyObject)] pub enum PyPattern<'p> { #[pyo3(annotation = "str")] Str(&'p str), #[pyo3(annotation = "tokenizers.Regex")] Regex(Py<PyRegex>), // TODO: Add the compatibility for Fn(char) -> bool } impl Pattern for PyPattern<'_> { fn find_matches(&self, inside: &str) -> tk::Result<Vec<(tk::Offsets, bool)>> { match self { PyPattern::Str(s) => { let mut chars = s.chars(); if let (Some(c), None) = (chars.next(), chars.next()) { c.find_matches(inside) } else { s.find_matches(inside) } } PyPattern::Regex(r) => { Python::with_gil(|py| (&r.borrow(py).inner).find_matches(inside)) } } } } impl From<PyPattern<'_>> for tk::normalizers::replace::ReplacePattern { fn from(pattern: PyPattern<'_>) -> Self { match pattern { PyPattern::Str(s) => Self::String(s.to_owned()), PyPattern::Regex(r) => Python::with_gil(|py| Self::Regex(r.borrow(py).pattern.clone())), } } } impl From<PyPattern<'_>> for tk::pre_tokenizers::split::SplitPattern { fn from(pattern: PyPattern<'_>) -> Self { match pattern { PyPattern::Str(s) => Self::String(s.to_owned()), PyPattern::Regex(r) => Python::with_gil(|py| Self::Regex(r.borrow(py).pattern.clone())), } } } #[derive(Debug, Clone, FromPyObject)] pub enum PyRange<'s> { #[pyo3(annotation = "int")] Single(isize), #[pyo3(annotation = "Tuple[uint, uint]")] Range(usize, usize), #[pyo3(annotation = "slice")] Slice(&'s PySlice), } impl PyRange<'_> { pub fn to_range(&self, max_len: usize) -> PyResult<std::ops::Range<usize>> { match self { PyRange::Single(i) => { if i.is_negative() { let i = -i as usize; if i > max_len { Err(exceptions::PyValueError::new_err(format!( "{} is bigger than max len", i ))) } else { Ok(max_len - i..max_len - i + 1) } } else { let i = *i as usize; Ok(i..i + 1) } } PyRange::Range(s, e) => Ok(*s..*e), PyRange::Slice(s) => { let r = s.indices(max_len as std::os::raw::c_long)?; Ok(r.start as usize..r.stop as usize) } } } } #[derive(Clone)] pub struct PySplitDelimiterBehavior(pub SplitDelimiterBehavior); impl FromPyObject<'_> for PySplitDelimiterBehavior { fn extract(obj: &PyAny) -> PyResult<Self> { let s = obj.extract::<&str>()?; Ok(Self(match s { "removed" => Ok(SplitDelimiterBehavior::Removed), "isolated" => Ok(SplitDelimiterBehavior::Isolated), "merged_with_previous" => Ok(SplitDelimiterBehavior::MergedWithPrevious), "merged_with_next" => Ok(SplitDelimiterBehavior::MergedWithNext), "contiguous" => Ok(SplitDelimiterBehavior::Contiguous), _ => Err(exceptions::PyValueError::new_err( "Wrong value for SplitDelimiterBehavior, expected one of: \ `removed, isolated, merged_with_previous, merged_with_next, contiguous`", )), }?)) } } impl From<PySplitDelimiterBehavior> for SplitDelimiterBehavior { fn from(v: PySplitDelimiterBehavior) -> Self { v.0 } } fn filter(normalized: &mut NormalizedString, func: &PyAny) -> PyResult<()> { let err = "`filter` expect a callable with the signature: `fn(char) -> bool`"; if !func.is_callable() { Err(exceptions::PyTypeError::new_err(err)) } else { normalized.filter(|c| { func.call1((c.to_string(),)) .expect(err) .extract() .expect(err) }); Ok(()) } } fn for_each(normalized: &NormalizedString, func: &PyAny) -> PyResult<()> { let err = "`for_each` expect a callable with the signature: `fn(char)`"; if !func.is_callable() { Err(exceptions::PyTypeError::new_err(err)) } else { normalized.for_each(|c| { func.call1((c.to_string(),)).expect(err); }); Ok(()) } } fn map(normalized: &mut NormalizedString, func: &PyAny) -> PyResult<()> { let err = "`map` expect a callable with the signature: `fn(char) -> char`"; if !func.is_callable() { Err(exceptions::PyTypeError::new_err(err)) } else { normalized.map(|c| { let c: &str = func .call1((c.to_string(),)) .expect(err) .extract() .expect(err); c.chars().next().expect(err) }); Ok(()) } } fn slice( normalized: &NormalizedString, range: &PyRange<'_>, ) -> PyResult<Option<PyNormalizedString>> { let n_char = normalized.len(); let char_range = range.to_range(n_char)?; Ok( char_to_bytes(normalized.get(), char_range).and_then(|bytes_range| { normalized .slice(Range::Normalized(bytes_range)) .map(|n| n.into()) }), ) } /// NormalizedString /// /// A NormalizedString takes care of modifying an "original" string, to obtain a "normalized" one. /// While making all the requested modifications, it keeps track of the alignment information /// between the two versions of the string. /// /// Args: /// sequence: str: /// The string sequence used to initialize this NormalizedString #[pyclass(module = "tokenizers", name = "NormalizedString")] #[derive(Clone)] pub struct PyNormalizedString { pub(crate) normalized: NormalizedString, } #[pymethods] impl PyNormalizedString { #[new] #[pyo3(text_signature = None)] fn new(s: &str) -> Self { NormalizedString::from(s).into() } /// The normalized part of the string #[getter] fn get_normalized(&self) -> &str { self.normalized.get() } #[getter] fn get_original(&self) -> &str { self.normalized.get_original() } /// Runs the NFD normalization #[pyo3(text_signature = "(self)")] fn nfd(&mut self) { self.normalized.nfd(); } /// Runs the NFKD normalization #[pyo3(text_signature = "(self)")] fn nfkd(&mut self) { self.normalized.nfkd(); } /// Runs the NFC normalization #[pyo3(text_signature = "(self)")] fn nfc(&mut self) { self.normalized.nfc(); } /// Runs the NFKC normalization #[pyo3(text_signature = "(self)")] fn nfkc(&mut self) { self.normalized.nfkc(); } /// Lowercase the string #[pyo3(text_signature = "(self)")] fn lowercase(&mut self) { self.normalized.lowercase(); } /// Uppercase the string #[pyo3(text_signature = "(self)")] fn uppercase(&mut self) { self.normalized.uppercase(); } /// Prepend the given sequence to the string #[pyo3(text_signature = "(self, s)")] fn prepend(&mut self, s: &str) { self.normalized.prepend(s); } /// Append the given sequence to the string #[pyo3(text_signature = "(self, s)")] fn append(&mut self, s: &str) { self.normalized.append(s); } /// Strip the left of the string #[pyo3(text_signature = "(self)")] fn lstrip(&mut self) { self.normalized.lstrip(); } /// Strip the right of the string #[pyo3(text_signature = "(self)")] fn rstrip(&mut self) { self.normalized.rstrip(); } /// Strip both ends of the string #[pyo3(text_signature = "(self)")] fn strip(&mut self) { self.normalized.strip(); } /// Clears the string #[pyo3(text_signature = "(self)")] fn clear(&mut self) { self.normalized.clear(); } /// Slice the string using the given range #[pyo3(text_signature = "(self, range)")] fn slice(&self, range: PyRange) -> PyResult<Option<PyNormalizedString>> { slice(&self.normalized, &range) } /// Filter each character of the string using the given func #[pyo3(text_signature = "(self, func)")] fn filter(&mut self, func: &PyAny) -> PyResult<()> { filter(&mut self.normalized, func) } /// Calls the given function for each character of the string #[pyo3(text_signature = "(self, func)")] fn for_each(&self, func: &PyAny) -> PyResult<()> { for_each(&self.normalized, func) } /// Calls the given function for each character of the string /// /// Replaces each character of the string using the returned value. Each /// returned value **must** be a str of length 1 (ie a character). #[pyo3(text_signature = "(self, func)")] fn map(&mut self, func: &PyAny) -> PyResult<()> { map(&mut self.normalized, func) } /// Split the NormalizedString using the given pattern and the specified behavior /// /// Args: /// pattern: Pattern: /// A pattern used to split the string. Usually a string or a regex built with `tokenizers.Regex` /// /// behavior: SplitDelimiterBehavior: /// The behavior to use when splitting. /// Choices: "removed", "isolated", "merged_with_previous", "merged_with_next", /// "contiguous" /// /// Returns: /// A list of NormalizedString, representing each split #[pyo3(text_signature = "(self, pattern, behavior)")] fn split( &mut self, pattern: PyPattern, behavior: PySplitDelimiterBehavior, ) -> PyResult<Vec<PyNormalizedString>> { Ok(ToPyResult(self.normalized.split(pattern, behavior.into())) .into_py()? .into_iter() .map(|n| n.into()) .collect()) } /// Replace the content of the given pattern with the provided content /// /// Args: /// pattern: Pattern: /// A pattern used to match the string. Usually a string or a Regex /// /// content: str: /// The content to be used as replacement #[pyo3(text_signature = "(self, pattern, content)")] fn replace(&mut self, pattern: PyPattern, content: &str) -> PyResult<()> { ToPyResult(self.normalized.replace(pattern, content)).into() } fn __repr__(&self) -> String { format!( r#"NormalizedString(original="{}", normalized="{}")"#, self.normalized.get_original(), self.normalized.get() ) } fn __str__(&self) -> &str { self.normalized.get() } fn __getitem__(&self, range: PyRange<'_>) -> PyResult<Option<PyNormalizedString>> { slice(&self.normalized, &range) } } impl From<NormalizedString> for PyNormalizedString { fn from(normalized: NormalizedString) -> Self { Self { normalized } } } impl From<PyNormalizedString> for NormalizedString { fn from(normalized: PyNormalizedString) -> Self { normalized.normalized } } #[pyclass(module = "tokenizers", name = "NormalizedStringRefMut")] #[derive(Clone)] pub struct PyNormalizedStringRefMut { inner: RefMutContainer<NormalizedString>, } impl DestroyPtr for PyNormalizedStringRefMut { fn destroy(&mut self) { self.inner.destroy(); } } impl PyNormalizedStringRefMut { pub fn new(normalized: &mut NormalizedString) -> RefMutGuard<Self> { RefMutGuard::new(Self { inner: RefMutContainer::new(normalized), }) } pub fn destroyed_error() -> PyErr { exceptions::PyException::new_err("Cannot use a NormalizedStringRefMut outside `normalize`") } /// Provides a way to access a reference to the underlying NormalizedString pub fn map_as_ref<F: FnOnce(&NormalizedString) -> U, U>(&self, f: F) -> PyResult<U> { self.inner .map(f) .ok_or_else(PyNormalizedStringRefMut::destroyed_error) } /// Provides a way to access a mutable reference to the underlying NormalizedString pub fn map_as_mut<F: FnOnce(&mut NormalizedString) -> U, U>(&mut self, f: F) -> PyResult<U> { self.inner .map_mut(f) .ok_or_else(PyNormalizedStringRefMut::destroyed_error) } } #[pymethods] impl PyNormalizedStringRefMut { #[getter] fn get_normalized(&self) -> PyResult<String> { self.inner .map(|n| n.get().to_owned()) .ok_or_else(PyNormalizedStringRefMut::destroyed_error) } #[getter] fn get_original(&self) -> PyResult<String> { self.inner .map(|n| n.get_original().to_owned()) .ok_or_else(PyNormalizedStringRefMut::destroyed_error) } fn nfd(&mut self) -> PyResult<()> { self.inner .map_mut(|n| { n.nfd(); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn nfkd(&mut self) -> PyResult<()> { self.inner .map_mut(|n| { n.nfkd(); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn nfc(&mut self) -> PyResult<()> { self.inner .map_mut(|n| { n.nfc(); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn nfkc(&mut self) -> PyResult<()> { self.inner .map_mut(|n| { n.nfkc(); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn lowercase(&mut self) -> PyResult<()> { self.inner .map_mut(|n| { n.lowercase(); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn uppercase(&mut self) -> PyResult<()> { self.inner .map_mut(|n| { n.uppercase(); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn prepend(&mut self, s: &str) -> PyResult<()> { self.inner .map_mut(|n| { n.prepend(s); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn append(&mut self, s: &str) -> PyResult<()> { self.inner .map_mut(|n| { n.append(s); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn lstrip(&mut self) -> PyResult<()> { self.inner .map_mut(|n| { n.lstrip(); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn rstrip(&mut self) -> PyResult<()> { self.inner .map_mut(|n| { n.rstrip(); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn strip(&mut self) -> PyResult<()> { self.inner .map_mut(|n| { n.strip(); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn clear(&mut self) -> PyResult<()> { self.inner .map_mut(|n| { n.clear(); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn slice(&self, range: PyRange) -> PyResult<Option<PyNormalizedString>> { self.inner .map(|n| slice(n, &range)) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)? } fn filter(&mut self, func: &PyAny) -> PyResult<()> { self.inner .map_mut(|n| filter(n, func)) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)??; Ok(()) } fn for_each(&self, func: &PyAny) -> PyResult<()> { self.inner .map(|n| for_each(n, func)) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)??; Ok(()) } fn map(&mut self, func: &PyAny) -> PyResult<()> { self.inner .map_mut(|n| map(n, func)) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)??; Ok(()) } fn split( &mut self, pattern: PyPattern, behavior: PySplitDelimiterBehavior, ) -> PyResult<Vec<PyNormalizedString>> { Ok(ToPyResult( self.inner .map_mut(|n| n.split(pattern, behavior.into())) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?, ) .into_py()? .into_iter() .map(|n| n.into()) .collect()) } fn replace(&mut self, pattern: PyPattern, content: &str) -> PyResult<()> { ToPyResult( self.inner .map_mut(|n| n.replace(pattern, content)) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?, ) .into() } }
tokenizers/bindings/python/src/utils/normalization.rs/0
{ "file_path": "tokenizers/bindings/python/src/utils/normalization.rs", "repo_id": "tokenizers", "token_count": 8467 }
236
from tokenizers import Tokenizer from ..utils import data_dir, doc_pipeline_bert_tokenizer, doc_wiki_tokenizer disable_printing = True original_print = print def print(*args, **kwargs): if not disable_printing: original_print(*args, **kwargs) class TestPipeline: def test_pipeline(self, doc_wiki_tokenizer): try: # START reload_tokenizer from tokenizers import Tokenizer tokenizer = Tokenizer.from_file("data/tokenizer-wiki.json") # END reload_tokenizer except Exception: tokenizer = Tokenizer.from_file(doc_wiki_tokenizer) # START setup_normalizer from tokenizers import normalizers from tokenizers.normalizers import NFD, StripAccents normalizer = normalizers.Sequence([NFD(), StripAccents()]) # END setup_normalizer # START test_normalizer normalizer.normalize_str("Héllò hôw are ü?") # "Hello how are u?" # END test_normalizer assert normalizer.normalize_str("Héllò hôw are ü?") == "Hello how are u?" # START replace_normalizer tokenizer.normalizer = normalizer # END replace_normalizer # START setup_pre_tokenizer from tokenizers.pre_tokenizers import Whitespace pre_tokenizer = Whitespace() pre_tokenizer.pre_tokenize_str("Hello! How are you? I'm fine, thank you.") # [("Hello", (0, 5)), ("!", (5, 6)), ("How", (7, 10)), ("are", (11, 14)), ("you", (15, 18)), # ("?", (18, 19)), ("I", (20, 21)), ("'", (21, 22)), ('m', (22, 23)), ("fine", (24, 28)), # (",", (28, 29)), ("thank", (30, 35)), ("you", (36, 39)), (".", (39, 40))] # END setup_pre_tokenizer assert pre_tokenizer.pre_tokenize_str("Hello! How are you? I'm fine, thank you.") == [ ("Hello", (0, 5)), ("!", (5, 6)), ("How", (7, 10)), ("are", (11, 14)), ("you", (15, 18)), ("?", (18, 19)), ("I", (20, 21)), ("'", (21, 22)), ("m", (22, 23)), ("fine", (24, 28)), (",", (28, 29)), ("thank", (30, 35)), ("you", (36, 39)), (".", (39, 40)), ] # START combine_pre_tokenizer from tokenizers import pre_tokenizers from tokenizers.pre_tokenizers import Digits pre_tokenizer = pre_tokenizers.Sequence([Whitespace(), Digits(individual_digits=True)]) pre_tokenizer.pre_tokenize_str("Call 911!") # [("Call", (0, 4)), ("9", (5, 6)), ("1", (6, 7)), ("1", (7, 8)), ("!", (8, 9))] # END combine_pre_tokenizer assert pre_tokenizer.pre_tokenize_str("Call 911!") == [ ("Call", (0, 4)), ("9", (5, 6)), ("1", (6, 7)), ("1", (7, 8)), ("!", (8, 9)), ] # START replace_pre_tokenizer tokenizer.pre_tokenizer = pre_tokenizer # END replace_pre_tokenizer # START setup_processor from tokenizers.processors import TemplateProcessing tokenizer.post_processor = TemplateProcessing( single="[CLS] $A [SEP]", pair="[CLS] $A [SEP] $B:1 [SEP]:1", special_tokens=[("[CLS]", 1), ("[SEP]", 2)], ) # END setup_processor # START test_decoding output = tokenizer.encode("Hello, y'all! How are you 😁 ?") print(output.ids) # [1, 27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35, 2] tokenizer.decode([1, 27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35, 2]) # "Hello , y ' all ! How are you ?" # END test_decoding assert output.ids == [1, 27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35, 2] assert ( tokenizer.decode([1, 27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35, 2]) == "Hello , y ' all ! How are you ?" ) @staticmethod def slow_train(): # START bert_setup_tokenizer from tokenizers import Tokenizer from tokenizers.models import WordPiece bert_tokenizer = Tokenizer(WordPiece(unk_token="[UNK]")) # END bert_setup_tokenizer # START bert_setup_normalizer from tokenizers import normalizers from tokenizers.normalizers import NFD, Lowercase, StripAccents bert_tokenizer.normalizer = normalizers.Sequence([NFD(), Lowercase(), StripAccents()]) # END bert_setup_normalizer # START bert_setup_pre_tokenizer from tokenizers.pre_tokenizers import Whitespace bert_tokenizer.pre_tokenizer = Whitespace() # END bert_setup_pre_tokenizer # START bert_setup_processor from tokenizers.processors import TemplateProcessing bert_tokenizer.post_processor = TemplateProcessing( single="[CLS] $A [SEP]", pair="[CLS] $A [SEP] $B:1 [SEP]:1", special_tokens=[ ("[CLS]", 1), ("[SEP]", 2), ], ) # END bert_setup_processor # START bert_train_tokenizer from tokenizers.trainers import WordPieceTrainer trainer = WordPieceTrainer(vocab_size=30522, special_tokens=["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"]) files = [f"data/wikitext-103-raw/wiki.{split}.raw" for split in ["test", "train", "valid"]] bert_tokenizer.train(files, trainer) bert_tokenizer.save("data/bert-wiki.json") # END bert_train_tokenizer def test_bert_example(self, doc_pipeline_bert_tokenizer): try: bert_tokenizer = Tokenizer.from_file("data/bert-wiki.json") except Exception: bert_tokenizer = Tokenizer.from_file(doc_pipeline_bert_tokenizer) # START bert_test_decoding output = bert_tokenizer.encode("Welcome to the 🤗 Tokenizers library.") print(output.tokens) # ["[CLS]", "welcome", "to", "the", "[UNK]", "tok", "##eni", "##zer", "##s", "library", ".", "[SEP]"] bert_tokenizer.decode(output.ids) # "welcome to the tok ##eni ##zer ##s library ." # END bert_test_decoding assert bert_tokenizer.decode(output.ids) == "welcome to the tok ##eni ##zer ##s library ." # START bert_proper_decoding from tokenizers import decoders bert_tokenizer.decoder = decoders.WordPiece() bert_tokenizer.decode(output.ids) # "welcome to the tokenizers library." # END bert_proper_decoding assert bert_tokenizer.decode(output.ids) == "welcome to the tokenizers library." if __name__ == "__main__": import os from urllib import request from zipfile import ZipFile disable_printing = False if not os.path.isdir("data/wikitext-103-raw"): print("Downloading wikitext-103...") wiki_text, _ = request.urlretrieve( "https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-raw-v1.zip" ) with ZipFile(wiki_text, "r") as z: print("Unzipping in data...") z.extractall("data") print("Now training...") TestPipeline.slow_train()
tokenizers/bindings/python/tests/documentation/test_pipeline.py/0
{ "file_path": "tokenizers/bindings/python/tests/documentation/test_pipeline.py", "repo_id": "tokenizers", "token_count": 3351 }
237
# Encode Inputs <tokenizerslangcontent> <python> These types represent all the different kinds of input that a [`~tokenizers.Tokenizer`] accepts when using [`~tokenizers.Tokenizer.encode_batch`]. ## TextEncodeInput[[[[tokenizers.TextEncodeInput]]]] <code>tokenizers.TextEncodeInput</code> Represents a textual input for encoding. Can be either: - A single sequence: [TextInputSequence](/docs/tokenizers/api/input-sequences#tokenizers.TextInputSequence) - A pair of sequences: - A Tuple of [TextInputSequence](/docs/tokenizers/api/input-sequences#tokenizers.TextInputSequence) - Or a List of [TextInputSequence](/docs/tokenizers/api/input-sequences#tokenizers.TextInputSequence) of size 2 alias of `Union[str, Tuple[str, str], List[str]]`. ## PreTokenizedEncodeInput[[[[tokenizers.PreTokenizedEncodeInput]]]] <code>tokenizers.PreTokenizedEncodeInput</code> Represents a pre-tokenized input for encoding. Can be either: - A single sequence: [PreTokenizedInputSequence](/docs/tokenizers/api/input-sequences#tokenizers.PreTokenizedInputSequence) - A pair of sequences: - A Tuple of [PreTokenizedInputSequence](/docs/tokenizers/api/input-sequences#tokenizers.PreTokenizedInputSequence) - Or a List of [PreTokenizedInputSequence](/docs/tokenizers/api/input-sequences#tokenizers.PreTokenizedInputSequence) of size 2 alias of `Union[List[str], Tuple[str], Tuple[Union[List[str], Tuple[str]], Union[List[str], Tuple[str]]], List[Union[List[str], Tuple[str]]]]`. ## EncodeInput[[[[tokenizers.EncodeInput]]]] <code>tokenizers.EncodeInput</code> Represents all the possible types of input for encoding. Can be: - When `is_pretokenized=False`: [TextEncodeInput](#tokenizers.TextEncodeInput) - When `is_pretokenized=True`: [PreTokenizedEncodeInput](#tokenizers.PreTokenizedEncodeInput) alias of `Union[str, Tuple[str, str], List[str], Tuple[str], Tuple[Union[List[str], Tuple[str]], Union[List[str], Tuple[str]]], List[Union[List[str], Tuple[str]]]]`. </python> <rust> The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website. </rust> <node> The node API has not been documented yet. </node> </tokenizerslangcontent>
tokenizers/docs/source-doc-builder/api/encode-inputs.mdx/0
{ "file_path": "tokenizers/docs/source-doc-builder/api/encode-inputs.mdx", "repo_id": "tokenizers", "token_count": 716 }
238
from collections import defaultdict, abc from typing import cast from docutils import nodes from docutils.parsers.rst import Directive import sphinx from sphinx.locale import _ from sphinx.util.docutils import SphinxDirective from sphinx.errors import ExtensionError from conf import languages as LANGUAGES logger = sphinx.util.logging.getLogger(__name__) GLOBALNAME = "$GLOBAL$" def update(d, u): for k, v in u.items(): if isinstance(v, abc.Mapping): d[k] = update(d.get(k, {}), v) else: d[k] = v return d class EntityNode(nodes.General, nodes.Element): pass class EntitiesNode(nodes.General, nodes.Element): pass class AllEntities: def __init__(self): self.entities = defaultdict(dict) @classmethod def install(cls, env): if not hasattr(env, "entity_all_entities"): entities = cls() env.entity_all_entities = entities return env.entity_all_entities def merge(self, other): self.entities.update(other.entities) def purge(self, docname): for env_docname in [GLOBALNAME, docname]: self.entities[env_docname] = dict( [ (name, entity) for name, entity in self.entities[env_docname].items() if entity["docname"] != docname ] ) def _extract_entities(self, nodes): pass def _extract_options(self, nodes): pass def _add_entities(self, entities, language, is_global, docname): scope = GLOBALNAME if is_global else docname for entity in entities: name = f'{language}-{entity["name"]}' content = entity["content"] if name in self.entities[scope]: logger.warning( f'Entity "{name}" has already been defined{" globally" if is_global else ""}', location=docname, ) self.entities[scope][name] = {"docname": docname, "content": content} def _extract_global(self, nodes): for node in nodes: if node.tagname != "field": raise Exception(f"Expected a field, found {node.tagname}") name, _ = node.children if name.tagname != "field_name": raise Exception(f"Expected a field name here, found {name_node.tagname}") if str(name.children[0]) == "global": return True def _extract_entities(self, nodes): entities = [] for node in nodes: if node.tagname != "definition_list_item": raise Exception(f"Expected a list item here, found {node.tagname}") name_node, content_node = node.children if name_node.tagname != "term": raise Exception(f"Expected a term here, found {name_node.tagname}") if content_node.tagname != "definition": raise Exception(f"Expected a definition here, found {content_node.tagname}") name = str(name_node.children[0]) if len(content_node.children) == 1 and content_node.children[0].tagname == "paragraph": content = content_node.children[0].children[0] else: content = content_node entities.append({"name": name, "content": content}) return entities def extract(self, node, docname): is_global = False entities = [] language = None for node in node.children: if language is None and node.tagname != "paragraph": raise Exception(f"Expected language name:\n.. entities:: <LANGUAGE>") elif language is None and node.tagname == "paragraph": language = str(node.children[0]) if language not in LANGUAGES: raise Exception( f'Unknown language "{language}. Might be missing a newline after language"' ) elif node.tagname == "field_list": is_global = self._extract_global(node.children) elif node.tagname == "definition_list": entities.extend(self._extract_entities(node.children)) else: raise Exception(f"Expected a list of terms/options, found {node.tagname}") self._add_entities(entities, language, is_global, docname) def resolve_pendings(self, app): env = app.builder.env updates = defaultdict(dict) for env_docname in self.entities.keys(): for name, entity in self.entities[env_docname].items(): docname = entity["docname"] node = entity["content"] for node in node.traverse(sphinx.addnodes.pending_xref): contnode = cast(nodes.TextElement, node[0].deepcopy()) newnode = None typ = node["reftype"] target = node["reftarget"] refdoc = node.get("refdoc", docname) domain = None try: if "refdomain" in node and node["refdomain"]: # let the domain try to resolve the reference try: domain = env.domains[node["refdomain"]] except KeyError as exc: raise NoUri(target, typ) from exc newnode = domain.resolve_xref( env, refdoc, app.builder, typ, target, node, contnode ) except NoUri: newnode = contnode updates[env_docname][name] = { "docname": docname, "content": newnode or contnode, } update(self.entities, updates) def get(self, language, name, docname): name = f"{language}-{name}" if name in self.entities[docname]: return self.entities[docname][name] elif name in self.entities[GLOBALNAME]: return self.entities[GLOBALNAME][name] else: return None class EntitiesDirective(SphinxDirective): has_content = True def run(self): content = nodes.definition_list() self.state.nested_parse(self.content, self.content_offset, content) try: entities = AllEntities.install(self.env) entities.extract(content, self.env.docname) except Exception as err: raise self.error(f'Malformed directive "entities": {err}') return [] def entity_role(name, rawtext, text, lineno, inliner, options={}, content=[]): node = EntityNode() node.entity = text return [node], [] def process_entity_nodes(app, doctree, docname): """ Replace all the entities by their content """ env = app.builder.env entities = AllEntities.install(env) entities.resolve_pendings(app) language = None try: language = next(l for l in LANGUAGES if l in app.tags) except Exception: logger.warning(f"No language tag specified, not resolving entities in {docname}") for node in doctree.traverse(EntityNode): if language is None: node.replace_self(nodes.Text(_(node.entity), _(node.entity))) else: entity = entities.get(language, node.entity, docname) if entity is None: node.replace_self(nodes.Text(_(node.entity), _(node.entity))) logger.warning(f'Entity "{node.entity}" has not been defined', location=node) else: node.replace_self(entity["content"]) def purge_entities(app, env, docname): """ Purge any entity that comes from the given docname """ entities = AllEntities.install(env) entities.purge(docname) def merge_entities(app, env, docnames, other): """ Merge multiple environment entities """ entities = AllEntities.install(env) other_entities = AllEntities.install(other) entities.merge(other_entities) def setup(app): app.add_node(EntityNode) app.add_node(EntitiesNode) app.add_directive("entities", EntitiesDirective) app.add_role("entity", entity_role) app.connect("doctree-resolved", process_entity_nodes) app.connect("env-merge-info", merge_entities) app.connect("env-purge-doc", purge_entities) return { "version": "0.1", "parallel_read_safe": True, "parallel_write_safe": True, }
tokenizers/docs/source/_ext/entities.py/0
{ "file_path": "tokenizers/docs/source/_ext/entities.py", "repo_id": "tokenizers", "token_count": 4032 }
239
.. entities:: python :global: class class classmethod class method Tokenizer :class:`~tokenizers.Tokenizer` Tokenizer.train :meth:`~tokenizers.Tokenizer.train` Tokenizer.save :meth:`~tokenizers.Tokenizer.save` Tokenizer.from_file :meth:`~tokenizers.Tokenizer.from_file` Tokenizer.encode :meth:`~tokenizers.Tokenizer.encode` Tokenizer.encode_batch :meth:`~tokenizers.Tokenizer.encode_batch` Tokenizer.decode :meth:`~tokenizers.Tokenizer.decode` Tokenizer.decode_batch :meth:`~tokenizers.Tokenizer.decode_batch` Tokenizer.token_to_id :meth:`~tokenizers.Tokenizer.token_to_id` Tokenizer.enable_padding :meth:`~tokenizers.Tokenizer.enable_padding` Encoding :class:`~tokenizers.Encoding` TemplateProcessing :class:`~tokenizers.processors.TemplateProcessing` Normalizer :class:`~tokenizers.normalizers.Normalizer` normalizers.Sequence :class:`~tokenizers.normalizers.Sequence` pre_tokenizers.Whitespace :class:`~tokenizers.pre_tokenizers.Whitespace` PreTokenizer :class:`~tokenizers.pre_tokenizers.PreTokenizer` models.BPE :class:`~tokenizers.models.BPE` models.Unigram :class:`~tokenizers.models.Unigram` models.WordLevel :class:`~tokenizers.models.WordLevel` models.WordPiece :class:`~tokenizers.models.WordPiece` Decoder :class:`~tokenizers.decoders.Decoder` .. entities:: rust :global: class struct classmethod static method Tokenizer :rust_struct:`~tokenizers::tokenizer::Tokenizer` Tokenizer.train :rust_meth:`~tokenizers::tokenizer::Tokenizer::train` Tokenizer.save :rust_meth:`~tokenizers::tokenizer::Tokenizer::save` Tokenizer.from_file :rust_meth:`~tokenizers::tokenizer::Tokenizer::from_file` Tokenizer.encode :rust_meth:`~tokenizers::tokenizer::Tokenizer::encode` Tokenizer.encode_batch :rust_meth:`~tokenizers::tokenizer::Tokenizer::encode_batch` Tokenizer.decode :rust_meth:`~tokenizers::tokenizer::Tokenizer::decode` Tokenizer.decode_batch :rust_meth:`~tokenizers::tokenizer::Tokenizer::decode_batch` Tokenizer.token_to_id :rust_meth:`~tokenizers::tokenizer::Tokenizer::token_to_id` Tokenizer.enable_padding :rust_meth:`~tokenizers::tokenizer::Tokenizer::enable_padding` Encoding :rust_struct:`~tokenizers::tokenizer::Encoding` TemplateProcessing :rust_struct:`~tokenizers::processors::template::TemplateProcessing` Normalizer :rust_trait:`~tokenizers::tokenizer::Normalizer` normalizers.Sequence :rust_struct:`~tokenizers::normalizers::utils::Sequence` pre_tokenizers.Whitespace :rust_struct:`~tokenizers::normalizers::whitespace::Whitespace` PreTokenizer :rust_trait:`~tokenizers::tokenizer::PreTokenizer` models.BPE :rust_struct:`~tokenizers::models::bpe::BPE` models.Unigram :rust_struct:`~tokenizers::models::unigram::Unigram` models.WordLevel :rust_struct:`~tokenizers::models::wordlevel::WordLevel` models.WordPiece :rust_struct:`~tokenizers::models::wordpiece::WordPiece` Decoder :rust_trait:`~tokenizers::tokenizer::Decoder` .. entities:: node :global: class class classmethod static method Tokenizer :obj:`Tokenizer` Tokenizer.train :obj:`Tokenizer.train()` Tokenizer.save :obj:`Tokenizer.save()` Tokenizer.from_file :obj:`Tokenizer.fromFile()` Tokenizer.encode :obj:`Tokenizer.encode()` Tokenizer.encode_batch :obj:`Tokenizer.encodeBatch()` Tokenizer.decode :obj:`Tokenizer.decode()` Tokenizer.decode_batch :obj:`Tokenizer.decodeBatch()` Tokenizer.token_to_id :obj:`Tokenizer.tokenToId()` Tokenizer.enable_padding :obj:`Tokenizer.setPadding()` Encoding :obj:`Encoding` TemplateProcessing :obj:`TemplateProcessing` Normalizer :obj:`Normalizer` normalizers.Sequence :obj:`Sequence` pre_tokenizers.Whitespace :obj:`Whitespace` PreTokenizer :obj:`PreTokenizer` models.BPE :obj:`BPE` models.Unigram :obj:`Unigram` models.WordLevel :obj:`WordLevel` models.WordPiece :obj:`WordPiece` Decoder :obj:`Decoder`
tokenizers/docs/source/entities.inc/0
{ "file_path": "tokenizers/docs/source/entities.inc", "repo_id": "tokenizers", "token_count": 2078 }
240
#[macro_use] extern crate criterion; mod common; use std::fs::File; use std::io::{BufRead, BufReader}; use std::path::Path; use criterion::Criterion; use tokenizers::models::bpe::{BpeTrainerBuilder, BPE}; use tokenizers::models::TrainerWrapper; use tokenizers::pre_tokenizers::byte_level::ByteLevel; use tokenizers::pre_tokenizers::whitespace::Whitespace; use tokenizers::tokenizer::{AddedToken, EncodeInput}; use tokenizers::Tokenizer; use common::{iter_bench_encode, iter_bench_encode_batch, iter_bench_train}; use std::ops::Deref; static BATCH_SIZE: usize = 1_000; fn create_gpt2_tokenizer(bpe: BPE) -> Tokenizer { let mut tokenizer = Tokenizer::new(bpe); tokenizer.with_pre_tokenizer(ByteLevel::default()); tokenizer.with_decoder(ByteLevel::default()); tokenizer.add_tokens(&[AddedToken::from("ing", false).single_word(false)]); tokenizer.add_special_tokens(&[AddedToken::from("[ENT]", true).single_word(true)]); tokenizer } fn bench_gpt2(c: &mut Criterion) { let bpe = BPE::from_file("data/gpt2-vocab.json", "data/gpt2-merges.txt") .build() .unwrap(); let tokenizer = create_gpt2_tokenizer(bpe); let mut lines: Vec<EncodeInput> = vec![]; let mut batches: Vec<Vec<EncodeInput>> = vec![vec![]]; for line in BufReader::new(File::open(Path::new("data/big.txt")).unwrap()).lines() { let line: EncodeInput = line.unwrap().into(); lines.push(line.clone()); if batches.last().unwrap().len() >= BATCH_SIZE { batches.push(vec![]); } batches.last_mut().unwrap().push(line); } c.bench_function("BPE GPT2 encode", |b| { b.iter_custom(|iters| iter_bench_encode(iters, tokenizer.deref(), &lines)) }); c.bench_function("BPE GPT2 encode batch", |b| { b.iter_custom(|iters| iter_bench_encode_batch(iters, tokenizer.deref(), &batches)) }); let bpe = BPE::from_file("data/gpt2-vocab.json", "data/gpt2-merges.txt") .cache_capacity(0) .build() .unwrap(); let tokenizer = create_gpt2_tokenizer(bpe); c.bench_function("BPE GPT2 encode, no cache", |b| { b.iter_custom(|iters| iter_bench_encode(iters, &tokenizer, &lines)) }); c.bench_function("BPE GPT2 encode batch, no cache", |b| { b.iter_custom(|iters| iter_bench_encode_batch(iters, &tokenizer, &batches)) }); } fn bench_train(c: &mut Criterion) { let mut trainer: TrainerWrapper = BpeTrainerBuilder::default() .show_progress(false) .build() .into(); let mut tokenizer = Tokenizer::new(BPE::default()).into_inner(); tokenizer.with_pre_tokenizer(Whitespace {}); c.bench_function("BPE Train vocabulary (small)", |b| { b.iter_custom(|iters| { iter_bench_train( iters, &mut tokenizer, &mut trainer, vec!["data/small.txt".to_string()], ) }) }); let mut tokenizer = Tokenizer::new(BPE::default()).into_inner(); tokenizer.with_pre_tokenizer(Whitespace {}); c.bench_function("BPE Train vocabulary (big)", |b| { b.iter_custom(|iters| { iter_bench_train( iters, &mut tokenizer, &mut trainer, vec!["data/big.txt".to_string()], ) }) }); } criterion_group! { name = benches; config = Criterion::default().sample_size(20); targets = bench_gpt2 } criterion_group! { name = benches_train; config = Criterion::default().sample_size(10); targets = bench_train } criterion_main!(benches, benches_train);
tokenizers/tokenizers/benches/bpe_benchmark.rs/0
{ "file_path": "tokenizers/tokenizers/benches/bpe_benchmark.rs", "repo_id": "tokenizers", "token_count": 1621 }
241
<div align="center"> <h1><code>create-wasm-app</code></h1> <strong>An <code>npm init</code> template for kick starting a project that uses NPM packages containing Rust-generated WebAssembly and bundles them with Webpack.</strong> <p> <a href="https://travis-ci.org/rustwasm/create-wasm-app"><img src="https://img.shields.io/travis/rustwasm/create-wasm-app.svg?style=flat-square" alt="Build Status" /></a> </p> <h3> <a href="#usage">Usage</a> <span> | </span> <a href="https://discordapp.com/channels/442252698964721669/443151097398296587">Chat</a> </h3> <sub>Built with 🦀🕸 by <a href="https://rustwasm.github.io/">The Rust and WebAssembly Working Group</a></sub> </div> ## About This template is designed for depending on NPM packages that contain Rust-generated WebAssembly and using them to create a Website. * Want to create an NPM package with Rust and WebAssembly? [Check out `wasm-pack-template`.](https://github.com/rustwasm/wasm-pack-template) * Want to make a monorepo-style Website without publishing to NPM? Check out [`rust-webpack-template`](https://github.com/rustwasm/rust-webpack-template) and/or [`rust-parcel-template`](https://github.com/rustwasm/rust-parcel-template). ## 🚴 Usage ``` npm init wasm-app ``` ## 🔋 Batteries Included - `.gitignore`: ignores `node_modules` - `LICENSE-APACHE` and `LICENSE-MIT`: most Rust projects are licensed this way, so these are included for you - `README.md`: the file you are reading now! - `index.html`: a bare bones html document that includes the webpack bundle - `index.js`: example js file with a comment showing how to import and use a wasm pkg - `package.json` and `package-lock.json`: - pulls in devDependencies for using webpack: - [`webpack`](https://www.npmjs.com/package/webpack) - [`webpack-cli`](https://www.npmjs.com/package/webpack-cli) - [`webpack-dev-server`](https://www.npmjs.com/package/webpack-dev-server) - defines a `start` script to run `webpack-dev-server` - `webpack.config.js`: configuration file for bundling your js with webpack ## License Licensed under either of * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) at your option. ### Contribution Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions.
tokenizers/tokenizers/examples/unstable_wasm/www/README.md/0
{ "file_path": "tokenizers/tokenizers/examples/unstable_wasm/www/README.md", "repo_id": "tokenizers", "token_count": 893 }
242
#![warn(clippy::all)] #![allow(clippy::upper_case_acronyms)] #![doc(html_favicon_url = "https://huggingface.co/favicon.ico")] #![doc(html_logo_url = "https://huggingface.co/landing/assets/huggingface_logo.svg")] //! The core of `tokenizers`, written in Rust. //! Provides an implementation of today's most used tokenizers, with a focus on performance and //! versatility. //! //! # What is a Tokenizer //! //! A Tokenizer works as a pipeline, it processes some raw text as input and outputs an `Encoding`. //! The various steps of the pipeline are: //! //! 1. The `Normalizer`: in charge of normalizing the text. Common examples of normalization are //! the [unicode normalization standards](https://unicode.org/reports/tr15/#Norm_Forms), such as `NFD` or `NFKC`. //! More details about how to use the `Normalizers` are available on the //! [Hugging Face blog](https://huggingface.co/docs/tokenizers/components#normalizers) //! 2. The `PreTokenizer`: in charge of creating initial words splits in the text. The most common way of //! splitting text is simply on whitespace. //! 3. The `Model`: in charge of doing the actual tokenization. An example of a `Model` would be //! `BPE` or `WordPiece`. //! 4. The `PostProcessor`: in charge of post-processing the `Encoding` to add anything relevant //! that, for example, a language model would need, such as special tokens. //! //! ## Loading a pretrained tokenizer from the Hub //! ``` //! use tokenizers::tokenizer::{Result, Tokenizer}; //! //! fn main() -> Result<()> { //! # #[cfg(feature = "http")] //! # { //! let tokenizer = Tokenizer::from_pretrained("bert-base-cased", None)?; //! //! let encoding = tokenizer.encode("Hey there!", false)?; //! println!("{:?}", encoding.get_tokens()); //! # } //! Ok(()) //! } //! ``` //! //! ## Deserialization and tokenization example //! //! ```no_run //! use tokenizers::tokenizer::{Result, Tokenizer, EncodeInput}; //! use tokenizers::models::bpe::BPE; //! //! fn main() -> Result<()> { //! let bpe_builder = BPE::from_file("./path/to/vocab.json", "./path/to/merges.txt"); //! let bpe = bpe_builder //! .dropout(0.1) //! .unk_token("[UNK]".into()) //! .build()?; //! //! let mut tokenizer = Tokenizer::new(bpe); //! //! let encoding = tokenizer.encode("Hey there!", false)?; //! println!("{:?}", encoding.get_tokens()); //! //! Ok(()) //! } //! ``` //! //! ## Training and serialization example //! //! ```no_run //! use tokenizers::decoders::DecoderWrapper; //! use tokenizers::models::bpe::{BpeTrainerBuilder, BPE}; //! use tokenizers::normalizers::{strip::Strip, unicode::NFC, utils::Sequence, NormalizerWrapper}; //! use tokenizers::pre_tokenizers::byte_level::ByteLevel; //! use tokenizers::pre_tokenizers::PreTokenizerWrapper; //! use tokenizers::processors::PostProcessorWrapper; //! use tokenizers::{AddedToken, Model, Result, TokenizerBuilder}; //! //! use std::path::Path; //! //! fn main() -> Result<()> { //! let vocab_size: usize = 100; //! //! let mut trainer = BpeTrainerBuilder::new() //! .show_progress(true) //! .vocab_size(vocab_size) //! .min_frequency(0) //! .special_tokens(vec![ //! AddedToken::from(String::from("<s>"), true), //! AddedToken::from(String::from("<pad>"), true), //! AddedToken::from(String::from("</s>"), true), //! AddedToken::from(String::from("<unk>"), true), //! AddedToken::from(String::from("<mask>"), true), //! ]) //! .build(); //! //! let mut tokenizer = TokenizerBuilder::new() //! .with_model(BPE::default()) //! .with_normalizer(Some(Sequence::new(vec![ //! Strip::new(true, true).into(), //! NFC.into(), //! ]))) //! .with_pre_tokenizer(Some(ByteLevel::default())) //! .with_post_processor(Some(ByteLevel::default())) //! .with_decoder(Some(ByteLevel::default())) //! .build()?; //! //! let pretty = false; //! tokenizer //! .train_from_files( //! &mut trainer, //! vec!["path/to/vocab.txt".to_string()], //! )? //! .save("tokenizer.json", pretty)?; //! //! Ok(()) //! } //! ``` //! //! # Additional information //! //! - tokenizers is designed to leverage CPU parallelism when possible. The level of parallelism is determined //! by the total number of core/threads your CPU provides but this can be tuned by setting the `RAYON_RS_NUM_THREADS` //! environment variable. As an example setting `RAYON_RS_NUM_THREADS=4` will allocate a maximum of 4 threads. //! **_Please note this behavior may evolve in the future_** //! //! # Features //! **progressbar**: The progress bar visualization is enabled by default. It might be disabled if //! compilation for certain targets is not supported by the [termios](https://crates.io/crates/termios) //! dependency of the [indicatif](https://crates.io/crates/indicatif) progress bar. #[macro_use] extern crate log; #[macro_use] extern crate lazy_static; #[macro_use] extern crate derive_builder; #[macro_use] pub mod utils; pub mod decoders; pub mod models; pub mod normalizers; pub mod pre_tokenizers; pub mod processors; pub mod tokenizer; // Re-export from tokenizer pub use tokenizer::*; // Re-export also parallelism utils pub use utils::parallelism; // Re-export for from_pretrained #[cfg(feature = "http")] pub use utils::from_pretrained::FromPretrainedParameters;
tokenizers/tokenizers/src/lib.rs/0
{ "file_path": "tokenizers/tokenizers/src/lib.rs", "repo_id": "tokenizers", "token_count": 2175 }
243
//! [WordPiece](https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/37842.pdf) //! model. use crate::models::bpe::BPE; use crate::tokenizer::{Model, Result, Token}; use std::{ borrow::Cow, collections::HashMap, fs::File, io::prelude::*, io::{BufRead, BufReader}, path::{Path, PathBuf}, }; mod serialization; mod trainer; pub use trainer::*; #[derive(thiserror::Error, Debug)] pub enum Error { #[error("WordPiece error: Missing [UNK] token from the vocabulary")] MissingUnkToken, } type Vocab = HashMap<String, u32>; type VocabR = HashMap<u32, String>; struct Config { files: Option<String>, vocab: Vocab, unk_token: String, continuing_subword_prefix: String, max_input_chars_per_word: usize, } /// A `WordPieceBuilder` can be used to create a `WordPiece` model with a custom configuration. pub struct WordPieceBuilder { config: Config, } impl Default for WordPieceBuilder { fn default() -> Self { Self { config: Config { files: None, vocab: HashMap::new(), unk_token: String::from("[UNK]"), continuing_subword_prefix: String::from("##"), max_input_chars_per_word: 100, }, } } } impl WordPieceBuilder { /// Construct a new `WordPieceBuilder`. pub fn new() -> Self { Self::default() } /// Set the input files. #[must_use] pub fn files(mut self, vocab: String) -> Self { self.config.files = Some(vocab); self } /// Set the vocab (token -> ID) mapping. #[must_use] pub fn vocab(mut self, vocab: Vocab) -> Self { self.config.vocab = vocab; self } /// The the `UNK` token for the vocab. #[must_use] pub fn unk_token(mut self, unk_token: String) -> Self { self.config.unk_token = unk_token; self } /// Set the prefix for continuing subwords. #[must_use] pub fn continuing_subword_prefix(mut self, continuing_subword_prefix: String) -> Self { self.config.continuing_subword_prefix = continuing_subword_prefix; self } /// Set the maximum number of input characters per word. #[must_use] pub fn max_input_chars_per_word(mut self, max_input_chars_per_word: usize) -> Self { self.config.max_input_chars_per_word = max_input_chars_per_word; self } /// Contructs a `WordPiece` model that uses the `WordPieceBuilder`'s configuration. pub fn build(mut self) -> Result<WordPiece> { if let Some(vocab) = self.config.files { self.config.vocab = WordPiece::read_file(&vocab)?; } let vocab_r = self .config .vocab .iter() .map(|(key, val)| (*val, key.to_owned())) .collect(); Ok(WordPiece { vocab: self.config.vocab, vocab_r, unk_token: self.config.unk_token, continuing_subword_prefix: self.config.continuing_subword_prefix, max_input_chars_per_word: self.config.max_input_chars_per_word, }) } } /// A /// [WordPiece](https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/37842.pdf) /// model. #[derive(Clone, PartialEq, Eq)] pub struct WordPiece { vocab: Vocab, vocab_r: VocabR, pub unk_token: String, pub continuing_subword_prefix: String, pub max_input_chars_per_word: usize, } impl std::fmt::Debug for WordPiece { fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { fmt.debug_struct("WordPiece") .field("unk_token", &self.unk_token) .field("continuing_subword_prefix", &self.continuing_subword_prefix) .field("max_input_chars_per_word", &self.max_input_chars_per_word) .field("vocab", &self.vocab.len()) .finish() } } impl Default for WordPiece { fn default() -> Self { Self { vocab: HashMap::new(), vocab_r: HashMap::new(), unk_token: String::from("[UNK]"), continuing_subword_prefix: String::from("##"), max_input_chars_per_word: 100, } } } impl WordPiece { /// Get a `WordPieceBuilder`. pub fn builder() -> WordPieceBuilder { WordPieceBuilder::new() } /// Read the given files to extract the vocab pub fn read_file(vocab: &str) -> Result<Vocab> { let file = File::open(vocab)?; let file = BufReader::new(file); let mut vocab = HashMap::new(); for (index, line) in file.lines().enumerate() { let line = line?; vocab.insert(line.trim_end().to_owned(), index as u32); } Ok(vocab) } /// Initialize a `WordPiece` model from a vocab mapping file. pub fn from_file(vocab: &str) -> WordPieceBuilder { WordPiece::builder().files(vocab.to_owned()) } /// Create a `WordPiece` model from a `BPE` model. pub fn from_bpe(bpe: &BPE) -> Self { let mut wp = Self::builder().vocab(bpe.get_vocab()).build().unwrap(); if let Some(unk) = bpe.get_unk_token() { wp.unk_token = unk.to_owned(); } if let Some(prefix) = bpe.get_continuing_subword_prefix() { wp.continuing_subword_prefix = prefix.to_owned(); } wp } } impl Model for WordPiece { type Trainer = WordPieceTrainer; fn get_vocab(&self) -> HashMap<String, u32> { self.vocab.clone() } fn get_vocab_size(&self) -> usize { self.vocab.len() } fn tokenize(&self, sequence: &str) -> Result<Vec<Token>> { let char_len = sequence.chars().count(); if char_len > self.max_input_chars_per_word { return Ok(vec![Token { value: self.unk_token.clone(), id: *self .vocab .get(&self.unk_token) .ok_or(Error::MissingUnkToken)?, offsets: (0, sequence.len()), }]); } let mut is_bad = false; let mut start = 0; let mut sub_tokens: Vec<Token> = vec![]; while start < sequence.len() { let mut end = sequence.len(); let mut cur_str = None; while start < end { let mut substr: Cow<str> = Cow::Borrowed(&sequence[start..end]); if start > 0 { substr = Cow::Owned(format!("{}{}", self.continuing_subword_prefix, substr)); } if self.vocab.contains_key(substr.as_ref()) { cur_str = Some(Token { id: self.vocab[substr.as_ref()], value: substr.to_string(), offsets: (start, end), }); break; } end -= substr.chars().last().map_or(1, |c| c.len_utf8()); } if cur_str.is_none() { is_bad = true; break; } sub_tokens.push(cur_str.unwrap()); start = end; } if is_bad { Ok(vec![Token { value: self.unk_token.clone(), id: *self .vocab .get(&self.unk_token) .ok_or(Error::MissingUnkToken)?, offsets: (0, sequence.len()), }]) } else { Ok(sub_tokens) } } fn token_to_id(&self, token: &str) -> Option<u32> { self.vocab.get(token).copied() } fn id_to_token(&self, id: u32) -> Option<String> { self.vocab_r.get(&id).cloned() } fn save(&self, folder: &Path, name: Option<&str>) -> Result<Vec<PathBuf>> { let vocab_file_name = match name { Some(name) => format!("{}-vocab.txt", name), None => "vocab.txt".to_string(), }; // Write vocab.txt let vocab_path: PathBuf = [folder, Path::new(vocab_file_name.as_str())] .iter() .collect(); let mut vocab_file = File::create(&vocab_path)?; let mut vocab: Vec<(&String, &u32)> = self.vocab.iter().collect(); vocab.sort_unstable_by_key(|k| *k.1); vocab_file.write_all( &vocab .into_iter() .flat_map(|(token, _)| format!("{}\n", token).as_bytes().to_owned()) .collect::<Vec<_>>()[..], )?; Ok(vec![vocab_path]) } fn get_trainer(&self) -> Self::Trainer { WordPieceTrainer::builder().build() } } #[cfg(test)] mod tests { use super::*; #[test] fn test_error_display() { assert!(format!("{}", Error::MissingUnkToken).contains("Missing [UNK] token")); } }
tokenizers/tokenizers/src/models/wordpiece/mod.rs/0
{ "file_path": "tokenizers/tokenizers/src/models/wordpiece/mod.rs", "repo_id": "tokenizers", "token_count": 4422 }
244
pub mod bert; pub mod byte_level; pub mod delimiter; pub mod digits; pub mod metaspace; pub mod punctuation; pub mod sequence; pub mod split; pub mod unicode_scripts; pub mod whitespace; use serde::{Deserialize, Serialize}; use crate::pre_tokenizers::bert::BertPreTokenizer; use crate::pre_tokenizers::byte_level::ByteLevel; use crate::pre_tokenizers::delimiter::CharDelimiterSplit; use crate::pre_tokenizers::digits::Digits; use crate::pre_tokenizers::metaspace::Metaspace; use crate::pre_tokenizers::punctuation::Punctuation; use crate::pre_tokenizers::sequence::Sequence; use crate::pre_tokenizers::split::Split; use crate::pre_tokenizers::unicode_scripts::UnicodeScripts; use crate::pre_tokenizers::whitespace::{Whitespace, WhitespaceSplit}; use crate::{PreTokenizedString, PreTokenizer}; #[derive(Deserialize, Serialize, Clone, Debug, PartialEq)] #[serde(untagged)] pub enum PreTokenizerWrapper { BertPreTokenizer(BertPreTokenizer), ByteLevel(ByteLevel), Delimiter(CharDelimiterSplit), Metaspace(Metaspace), Whitespace(Whitespace), Sequence(Sequence), Split(Split), Punctuation(Punctuation), WhitespaceSplit(WhitespaceSplit), Digits(Digits), UnicodeScripts(UnicodeScripts), } impl PreTokenizer for PreTokenizerWrapper { fn pre_tokenize(&self, normalized: &mut PreTokenizedString) -> crate::Result<()> { match self { Self::BertPreTokenizer(bpt) => bpt.pre_tokenize(normalized), Self::ByteLevel(bpt) => bpt.pre_tokenize(normalized), Self::Delimiter(dpt) => dpt.pre_tokenize(normalized), Self::Metaspace(mspt) => mspt.pre_tokenize(normalized), Self::Whitespace(wspt) => wspt.pre_tokenize(normalized), Self::Punctuation(tok) => tok.pre_tokenize(normalized), Self::Sequence(tok) => tok.pre_tokenize(normalized), Self::Split(tok) => tok.pre_tokenize(normalized), Self::WhitespaceSplit(wspt) => wspt.pre_tokenize(normalized), Self::Digits(wspt) => wspt.pre_tokenize(normalized), Self::UnicodeScripts(us) => us.pre_tokenize(normalized), } } } impl_enum_from!(BertPreTokenizer, PreTokenizerWrapper, BertPreTokenizer); impl_enum_from!(ByteLevel, PreTokenizerWrapper, ByteLevel); impl_enum_from!(CharDelimiterSplit, PreTokenizerWrapper, Delimiter); impl_enum_from!(Whitespace, PreTokenizerWrapper, Whitespace); impl_enum_from!(Punctuation, PreTokenizerWrapper, Punctuation); impl_enum_from!(Sequence, PreTokenizerWrapper, Sequence); impl_enum_from!(Split, PreTokenizerWrapper, Split); impl_enum_from!(Metaspace, PreTokenizerWrapper, Metaspace); impl_enum_from!(WhitespaceSplit, PreTokenizerWrapper, WhitespaceSplit); impl_enum_from!(Digits, PreTokenizerWrapper, Digits); impl_enum_from!(UnicodeScripts, PreTokenizerWrapper, UnicodeScripts); #[cfg(test)] mod tests { use super::*; #[test] fn test_deserialize() { let pre_tokenizer: PreTokenizerWrapper = serde_json::from_str(r#"{"type":"Sequence","pretokenizers":[{"type":"WhitespaceSplit"},{"type":"Metaspace","replacement":"▁","str_rep":"▁","add_prefix_space":true}]}"#).unwrap(); assert_eq!( pre_tokenizer, PreTokenizerWrapper::Sequence(Sequence::new(vec![ PreTokenizerWrapper::WhitespaceSplit(WhitespaceSplit {}), PreTokenizerWrapper::Metaspace(Metaspace::new('▁', true)) ])) ); let pre_tokenizer: PreTokenizerWrapper = serde_json::from_str( r#"{"type":"Metaspace","replacement":"▁","add_prefix_space":true}"#, ) .unwrap(); assert_eq!( pre_tokenizer, PreTokenizerWrapper::Metaspace(Metaspace::new('▁', true)) ); let pre_tokenizer: PreTokenizerWrapper = serde_json::from_str(r#"{"type":"Sequence","pretokenizers":[{"type":"WhitespaceSplit"},{"type":"Metaspace","replacement":"▁","add_prefix_space":true}]}"#).unwrap(); assert_eq!( pre_tokenizer, PreTokenizerWrapper::Sequence(Sequence::new(vec![ PreTokenizerWrapper::WhitespaceSplit(WhitespaceSplit {}), PreTokenizerWrapper::Metaspace(Metaspace::new('▁', true)) ])) ); let pre_tokenizer: PreTokenizerWrapper = serde_json::from_str( r#"{"type":"Metaspace","replacement":"▁","add_prefix_space":true, "prepend_scheme":"first"}"#, ) .unwrap(); assert_eq!( pre_tokenizer, PreTokenizerWrapper::Metaspace(Metaspace::new_with_prepend_scheme( '▁', true, metaspace::PrependScheme::First )) ); let pre_tokenizer: PreTokenizerWrapper = serde_json::from_str( r#"{"type":"Metaspace","replacement":"▁","add_prefix_space":true, "prepend_scheme":"always"}"#, ) .unwrap(); assert_eq!( pre_tokenizer, PreTokenizerWrapper::Metaspace(Metaspace::new_with_prepend_scheme( '▁', true, metaspace::PrependScheme::Always )) ); } #[test] fn test_deserialize_whitespace_split() { let pre_tokenizer: PreTokenizerWrapper = serde_json::from_str(r#"{"type":"WhitespaceSplit"}"#).unwrap(); assert_eq!( pre_tokenizer, PreTokenizerWrapper::WhitespaceSplit(WhitespaceSplit {}) ); } }
tokenizers/tokenizers/src/pre_tokenizers/mod.rs/0
{ "file_path": "tokenizers/tokenizers/src/pre_tokenizers/mod.rs", "repo_id": "tokenizers", "token_count": 2430 }
245
use crate::pattern::Pattern; use crate::{Offsets, Result}; use std::ops::{Bound, RangeBounds}; use unicode_normalization_alignments::UnicodeNormalization; use serde::{Deserialize, Serialize}; /// The possible offsets referential #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum OffsetReferential { Original, Normalized, } /// Represents a Range usable by the NormalizedString to index its content. /// A Range can use indices relative to either the `Original` or the `Normalized` string #[derive(Debug, Clone, PartialEq, Eq)] pub enum Range<T: RangeBounds<usize> + Clone> { Original(T), Normalized(T), } #[allow(clippy::len_without_is_empty)] impl<T> Range<T> where T: RangeBounds<usize> + Clone, { /// Unwrap the underlying range pub fn unwrap(self) -> T { match self { Self::Original(r) => r, Self::Normalized(r) => r, } } /// Return the length of the current Range if not Unbounded pub fn len(&self) -> Option<usize> { let range = self.clone().unwrap(); let end = match range.end_bound() { Bound::Unbounded => None, Bound::Included(i) => Some(*i + 1), Bound::Excluded(i) => Some(*i), }?; match range.start_bound() { Bound::Unbounded => Some(end), Bound::Included(i) => Some(end - (*i + 1)), Bound::Excluded(i) => Some(end - *i), } } /// Converts the current Range to a `std::ops::Range<usize>`. This requires the `max_len` /// of the represented string (in chars, not bytes) in order to cover the case where the /// original provided range was unbounded pub fn into_full_range(self, max_len: usize) -> std::ops::Range<usize> { let range = self.unwrap(); let start = match range.start_bound() { Bound::Unbounded => 0, Bound::Included(i) => *i, Bound::Excluded(i) => *i + 1, }; let end = match range.end_bound() { Bound::Unbounded => max_len, Bound::Included(i) => *i + 1, Bound::Excluded(i) => *i, }; start..end } } /// Defines the expected behavior for the delimiter of a Split Pattern /// When splitting on `'-'` for example, with input `the-final--countdown`: /// - Removed => `[ "the", "final", "countdown" ]` /// - Isolated => `[ "the", "-", "final", "-", "-", "countdown" ]` /// - MergedWithPrevious => `[ "the-", "final-", "-", "countdown" ]` /// - MergedWithNext => `[ "the", "-final", "-", "-countdown" ]` /// - Contiguous => `[ "the", "-", "final", "--", "countdown" ]` #[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize, Eq)] pub enum SplitDelimiterBehavior { Removed, Isolated, MergedWithPrevious, MergedWithNext, Contiguous, } /// A `NormalizedString` takes care of processing an "original" string to modify /// it and obtain a "normalized" string. It keeps both version of the string, /// alignments information between both and provides an interface to retrieve /// ranges of each string, using offsets from any of them. /// /// It is possible to retrieve a part of the original string, by indexing it with /// offsets from the normalized one, and the other way around too. It is also /// possible to convert offsets from one referential to the other one easily. #[derive(Default, Debug, Clone, PartialEq, Eq)] pub struct NormalizedString { /// The original version of the string, before any modification original: String, /// The normalized version of the string, after all modifications normalized: String, /// Mapping from normalized string to original one: (start, end) for each /// byte of the normalized string alignments: Vec<(usize, usize)>, /// If this NormalizedString is a slice of a bigger one, we keep the track /// of the missing part, so that we can still give offsets from this original /// string. original_shift: usize, } impl NormalizedString { #[cfg(test)] pub(crate) fn new( original: String, normalized: String, alignments: Vec<(usize, usize)>, original_shift: usize, ) -> Self { Self { original, normalized, alignments, original_shift, } } /// Return the normalized string pub fn get(&self) -> &str { &self.normalized } /// Return the original string pub fn get_original(&self) -> &str { &self.original } /// Return the original offsets pub fn offsets_original(&self) -> Offsets { ( self.original_shift, self.original_shift + self.len_original(), ) } /// Convert the given offsets range from one referential to the other one: /// `Original => Normalized` or `Normalized => Original` /// /// Returns `None` when targeting something that is outside range pub fn convert_offsets<T>(&self, range: Range<T>) -> Option<std::ops::Range<usize>> where T: RangeBounds<usize> + Clone, { let len_original = self.len_original(); let len_normalized = self.len(); let (target, original) = match range { Range::Original(_) => (range.into_full_range(len_original), true), Range::Normalized(_) => (range.into_full_range(len_normalized), false), }; // If we target an empty range, let's return the same if target.start == target.end { return Some(target); } // If the target goes reverse, return None if target.start > target.end { return None; } // If we target 0..0 on an empty string, we want to expand to the entire equivalent if original && self.original.is_empty() && target == (0..0) { return Some(0..len_normalized); } if !original && self.normalized.is_empty() && target == (0..0) { return Some(0..len_original); } if original { let (mut start, mut end) = (None, None); self.alignments .iter() .enumerate() .take_while(|(_, alignment)| target.end >= alignment.1) .for_each(|(i, alignment)| { if start.is_none() && target.start <= alignment.0 { // For now, don't update if width == 0 if alignment.0 != alignment.1 { start = Some(i); } } if target.end >= alignment.1 { end = Some(i + 1); } }); match (start, end) { // Targetting inexistant beginning (Some(s), None) => Some(s..s), // Targetting inexistant end (None, Some(e)) => Some(e..e), // Found the range (Some(s), Some(e)) => Some(s..e), _ => None, } } else { self.alignments.get(target).and_then(expand_alignments) } } /// Return a range of the normalized string pub fn get_range<T>(&self, range: Range<T>) -> Option<&str> where T: RangeBounds<usize> + Clone, { match range { Range::Original(_) => self.normalized.get(self.convert_offsets(range)?), Range::Normalized(_) => self.normalized.get(range.into_full_range(self.len())), } } /// Return a range of the original string pub fn get_range_original<T>(&self, range: Range<T>) -> Option<&str> where T: RangeBounds<usize> + Clone, { match range { Range::Original(_) => self .original .get(range.into_full_range(self.len_original())), Range::Normalized(_) => self.original.get(self.convert_offsets(range)?), } } /// Validate the given range, to make sure it is on char boundaries fn validate_range<T: RangeBounds<usize> + Clone>( &self, range: Range<T>, ) -> Option<Range<std::ops::Range<usize>>> { match range { Range::Original(_) => { let r = range.into_full_range(self.original.len()); if !(self.original.is_char_boundary(r.start) && self.original.is_char_boundary(r.end)) { None } else { Some(Range::Original(r)) } } Range::Normalized(_) => { let r = range.into_full_range(self.normalized.len()); if !(self.normalized.is_char_boundary(r.start) && self.normalized.is_char_boundary(r.end)) { None } else { Some(Range::Normalized(r)) } } } } /// Return a slice of the current NormalizedString /// If the range is not on char boundaries, return None pub fn slice<T>(&self, range: Range<T>) -> Option<NormalizedString> where T: RangeBounds<usize> + Clone, { let full_range = self.validate_range(range)?; let (normalized_range, original_range) = match full_range { Range::Original(_) => ( self.convert_offsets(full_range.clone())?, full_range.clone().unwrap(), ), Range::Normalized(_) => ( full_range.clone().unwrap(), self.convert_offsets(full_range.clone())?, ), }; let n_shift = original_range.start; Some(Self { original: self .get_range_original(full_range.clone()) .unwrap_or_default() .into(), normalized: self.get_range(full_range).unwrap_or_default().into(), alignments: self .alignments .get(normalized_range)? .to_vec() .iter() .map(|(start, end)| (start - n_shift, end - n_shift)) .collect(), original_shift: self.original_shift + original_range.start, }) } /// Applies transformations to the current normalized version of the string, /// while updating the alignments. /// This method expect an Iterator yielding each char of the new normalized string /// with a `change` isize equals to: /// - `1` if this is a new char /// - `-N` if the char is right before N removed chars /// - `0` if the char is replacing the existing one /// Since it is possible that the normalized string doesn't include some of the characters at /// the beginning of the original one, we need an `initial_offset` which represents the number /// of removed chars at the very beginning. pub fn transform_range<T, I>(&mut self, range: Range<T>, dest: I, initial_offset: usize) where T: RangeBounds<usize> + Clone, I: IntoIterator<Item = (char, isize)>, { let n_range = match range { Range::Normalized(_) => range.into_full_range(self.len()), Range::Original(_) => match self.convert_offsets(range) { Some(range) => range, None => return, }, }; trace!( "===== transform_range call with {:?} (initial_offset: {}) =====", n_range, initial_offset ); // Retrieve the original characters that are being replaced. This let us // compute the change in byte sizes along the way. let mut replaced_normalized = self.normalized[n_range.clone()] .chars() .collect::<Vec<_>>() .into_iter(); let initial_removed: usize = (&mut replaced_normalized) .take(initial_offset) .map(|c| c.len_utf8()) .sum(); let mut offset = (initial_removed + n_range.start) as isize; let mut alignments = Vec::with_capacity(n_range.len()); trace!("=> Applying transformations"); let normalized = dest .into_iter() .map(|(c, changes)| { trace!( "### {:?} with size {}: {} with offset {} ###", c, c.len_utf8(), match changes { 0 => "Replacing".into(), ch if ch > 0 => "Adding".into(), ch if ch < 0 => format!("Replacing + removing {} following chars", ch), _ => "Undefined".into(), }, offset ); let idx = offset as usize; let align = if changes.is_positive() { if idx < 1 { (0, 0) } else { // This is a newly inserted character, so it shares the same alignment // than the previous one self.alignments[idx - 1] } } else { self.alignments[idx] }; // If we are replacing a character, find it and compute the change in size let replaced_char = if !changes.is_positive() { replaced_normalized.next() } else { None }; let replaced_char_size = replaced_char.map_or(0, |c| c.len_utf8()); let replaced_char_size_change = c.len_utf8() as isize - replaced_char_size as isize; if let Some(ref replaced_char) = replaced_char { trace!( "Replacing char {:?} - with a change in size: {}", replaced_char, replaced_char_size_change ); } // If we are removing some characters, find them too let total_bytes_to_remove = if changes.is_negative() { (&mut replaced_normalized) .take(-changes as usize) .map(|c| c.len_utf8()) .sum() } else { 0 }; trace!("Total bytes to remove: {}", total_bytes_to_remove); // Keep track of the changes for next offsets offset += replaced_char_size as isize; offset += total_bytes_to_remove as isize; trace!("New offset: {}", offset); trace!("New normalized alignment: {}x {:?}", c.len_utf8(), align); alignments.extend((0..c.len_utf8()).map(|_| align)); // Then we keep only the char for string reconstruction c }) .collect::<String>(); self.alignments.splice(n_range.clone(), alignments); unsafe { self.normalized .as_mut_vec() .splice(n_range, normalized.bytes()); } } /// Applies transformations to the current normalized version of the string, /// while updating the alignments. /// This method expect an Iterator yielding each char of the new normalized string /// with a `change` isize equals to: /// - `1` if this is a new char /// - `-N` if the char is right before N removed chars /// - `0` if the char is replacing the existing one /// Since it is possible that the normalized string doesn't include some of the characters at /// the beginning of the original one, we need an `initial_offset` which represents the number /// of removed chars at the very beginning. pub fn transform<I>(&mut self, dest: I, initial_offset: usize) where I: IntoIterator<Item = (char, isize)>, { self.transform_range(Range::Original(..), dest, initial_offset) } /// Applies NFD normalization pub fn nfd(&mut self) -> &mut Self { self.transform(self.get().to_owned().nfd(), 0); self } /// Applies NFKD normalization pub fn nfkd(&mut self) -> &mut Self { self.transform(self.get().to_owned().nfkd(), 0); self } /// Applies NFC normalization pub fn nfc(&mut self) -> &mut Self { self.transform(self.get().to_owned().nfc(), 0); self } /// Applies NFKC normalization pub fn nfkc(&mut self) -> &mut Self { self.transform(self.get().to_owned().nfkc(), 0); self } /// Applies filtering over our characters pub fn filter<F: Fn(char) -> bool>(&mut self, keep: F) -> &mut Self { let mut removed: isize = 0; let mut removed_start: usize = 0; let mut transforms = Vec::with_capacity(self.normalized.len()); let mut last_c = None; for c in self.normalized.chars() { if keep(c) { match last_c { Some(lc) => { transforms.push((lc, -removed)); } None => { removed_start = removed as usize; } } last_c = Some(c); removed = 0; } else { removed += 1; } } if let Some(lc) = last_c { transforms.push((lc, -removed)); } self.transform(transforms, removed_start); self } /// Prepend the given string to ourself pub fn prepend(&mut self, s: &str) -> &mut Self { if let Some(next) = self.normalized.chars().next() { let transformations = s .chars() .enumerate() .map(|(i, c)| (c, isize::from(i != 0))) .chain(std::iter::once((next, 1))); self.transform_range(Range::Normalized(0..next.len_utf8()), transformations, 0); } self } /// Append the given string to ourself pub fn append(&mut self, s: &str) -> &mut Self { if let Some((b, prev)) = self.normalized.char_indices().last() { let transformations = std::iter::once((prev, 0)).chain(s.chars().map(|c| (c, 1))); self.transform_range(Range::Normalized(b..), transformations, 0); } self } /// Map our characters pub fn map<F: Fn(char) -> char>(&mut self, map: F) -> &mut Self { let transformations = self .normalized .chars() .map(|c| (map(c), 0)) .collect::<Vec<_>>(); self.transform(transformations, 0); self } /// Calls the given function for each characters pub fn for_each<F: FnMut(char)>(&self, foreach: F) -> &Self { self.normalized.chars().for_each(foreach); self } /// Lowercase pub fn lowercase(&mut self) -> &mut Self { let mut new_chars: Vec<(char, isize)> = vec![]; self.for_each(|c| { c.to_lowercase().enumerate().for_each(|(index, c)| { new_chars.push((c, isize::from(index > 0))); }) }); self.transform(new_chars, 0); self } /// Uppercase pub fn uppercase(&mut self) -> &mut Self { let mut new_chars: Vec<(char, isize)> = vec![]; self.for_each(|c| { c.to_uppercase().enumerate().for_each(|(index, c)| { new_chars.push((c, isize::from(index > 0))); }) }); self.transform(new_chars, 0); self } /// Replace anything that matches the pattern with the given content. pub fn replace<P: Pattern>(&mut self, pattern: P, content: &str) -> Result<()> { let mut new_normalized = String::with_capacity(self.normalized.len()); // Initially allocate for the input size let mut new_alignments: Vec<(usize, usize)> = Vec::with_capacity(self.alignments.len()); let mut last_end = 0; // Keep track of the last end position pattern .find_matches(&self.normalized)? .into_iter() .for_each(|((start, end), is_match)| { if is_match { let range = start..end; let mut new_len = 0; let removed_chars = self.normalized[range.clone()].chars().count(); /* The following code is equivalent to this call, but computationally much more efficient self.transform_range( Range::Normalized(range), content.chars().map(|c| { new_len += c.len_utf8(); (c, 1) }), removed_chars, ); */ // Copy the part of the string that is before the match new_normalized.push_str(&self.normalized[last_end..start]); new_alignments.extend(self.alignments[last_end..start].iter().cloned()); let n_range = Range::Normalized(range).into_full_range(self.len()); // Retrieve the original characters that are being replaced. This let us // compute the change in byte sizes along the way. let mut replaced_normalized = self.normalized[n_range.clone()] .chars() .collect::<Vec<_>>() .into_iter(); let initial_removed: usize = (&mut replaced_normalized) .take(removed_chars) .map(|c| c.len_utf8()) .sum(); let dest = content.chars().map(|c| { new_len += c.len_utf8(); (c, 1) }); let mut offset = (initial_removed + n_range.start) as isize; let normalized = dest .into_iter() .map(|(c, changes): (char, i32)| { let idx = offset as usize; let align = if changes.is_positive() { if idx < 1 { (0, 0) } else { // This is a newly inserted character, so it shares the same alignment // than the previous one self.alignments[idx - 1] } } else { self.alignments[idx] }; // If we are replacing a character, find it and compute the change in size let replaced_char = if !changes.is_positive() { replaced_normalized.next() } else { None }; let replaced_char_size = replaced_char.map_or(0, |c| c.len_utf8()); // If we are removing some characters, find them too let total_bytes_to_remove = if changes.is_negative() { (&mut replaced_normalized) .take(-changes as usize) .map(|c| c.len_utf8()) .sum() } else { 0 }; // Keep track of the changes for next offsets offset += replaced_char_size as isize; offset += total_bytes_to_remove as isize; new_alignments.extend((0..c.len_utf8()).map(|_| align)); // Then we keep only the char for string reconstruction c }) .collect::<String>(); new_normalized.push_str(&normalized); last_end = end; } }); // Copy the remaining part of the input new_normalized.push_str(&self.normalized[last_end..]); new_alignments.extend(&self.alignments[last_end..]); self.normalized = new_normalized; self.alignments = new_alignments; Ok(()) } /// Clear the normalized part of the string pub fn clear(&mut self) -> usize { let len = self.len(); self.transform(std::iter::empty(), len); len } /// Split the current string in many subparts. Specify what to do with the /// delimiter. /// /// ## Splitting Behavior for the delimiter /// /// The behavior can be one of the followings: /// When splitting on `'-'` for example, with input `the-final--countdown`: /// - Removed => `[ "the", "", "final", "", "", "countdown" ]` /// - Isolated => `[ "the", "-", "final", "-", "-", "countdown" ]` /// - MergedWithPrevious => `[ "the-", "final-", "-", "countdown" ]` /// - MergedWithNext => `[ "the", "-final", "-", "-countdown" ]` pub fn split<P: Pattern>( &self, pattern: P, behavior: SplitDelimiterBehavior, ) -> Result<Vec<NormalizedString>> { let matches = pattern.find_matches(&self.normalized)?; // Process the matches according to the selected behavior: Vec<(Offsets, should_remove)> use SplitDelimiterBehavior::*; let splits = match behavior { Isolated => matches .into_iter() .map(|(offsets, _)| (offsets, false)) .collect(), Removed => matches, Contiguous => { let mut previous_match = false; matches .into_iter() .fold(vec![], |mut acc, (offsets, is_match)| { if is_match == previous_match { if let Some(((_, end), _)) = acc.last_mut() { *end = offsets.1; } else { acc.push((offsets, false)); } } else { acc.push((offsets, false)); } previous_match = is_match; acc }) } MergedWithPrevious => { let mut previous_match = false; matches .into_iter() .fold(vec![], |mut acc, (offsets, is_match)| { if is_match && !previous_match { if let Some(((_, end), _)) = acc.last_mut() { *end = offsets.1; } else { acc.push((offsets, false)); } } else { acc.push((offsets, false)); } previous_match = is_match; acc }) } MergedWithNext => { let mut previous_match = false; let mut matches = matches .into_iter() .rev() .fold(vec![], |mut acc, (offsets, is_match)| { if is_match && !previous_match { if let Some(((start, _), _)) = acc.last_mut() { *start = offsets.0; } else { acc.push((offsets, false)); } } else { acc.push((offsets, false)); } previous_match = is_match; acc }); matches.reverse(); matches } }; // Then we split according to the computed splits Ok(splits .into_iter() .filter_map(|(offsets, remove)| { if !remove { Some( self.slice(Range::Normalized(offsets.0..offsets.1)) .expect("NormalizedString bad split"), ) } else { None } }) .collect()) } /// Remove any leading space(s) of the normalized string pub fn lstrip(&mut self) -> &mut Self { self.lrstrip(true, false) } /// Remove any trailing space(s) of the normalized string pub fn rstrip(&mut self) -> &mut Self { self.lrstrip(false, true) } /// Remove any leading and trailing space(s) of the normalized string pub fn strip(&mut self) -> &mut Self { self.lrstrip(true, true) } fn lrstrip(&mut self, left: bool, right: bool) -> &mut Self { let leading_spaces = if left { self.get().chars().take_while(|c| c.is_whitespace()).count() } else { 0 }; let trailing_spaces = if right { self.get() .chars() .rev() .take_while(|c| c.is_whitespace()) .count() } else { 0 }; if leading_spaces > 0 || trailing_spaces > 0 { let count = self.get().chars().count(); let transformation = self .normalized .chars() .enumerate() .filter_map(|(i, c)| { if i < leading_spaces || i >= count - trailing_spaces { None } else if i == self.len() - trailing_spaces - 1 { Some((c, -(trailing_spaces as isize))) } else { Some((c, 0)) } }) .collect::<Vec<_>>(); self.transform(transformation, leading_spaces); } self } /// Returns the length of the normalized string (counting chars not bytes) pub fn len(&self) -> usize { self.normalized.len() } /// Returns the length of the original string (counting chars not bytes) pub fn len_original(&self) -> usize { self.original.len() } /// Whether empty pub fn is_empty(&self) -> bool { self.normalized.is_empty() } /// Recalculate original alignments #[allow(dead_code)] pub(crate) fn alignments_original(&self) -> Vec<(usize, usize)> { // Start, end are in alignments // offset, length are in alignments_original let mut alignments_original = Vec::with_capacity(self.original.len()); // Eventual gap before first group let start = self.alignments[0].0; if start != 0 { alignments_original.extend(vec![(0, 0); start]); } let mut last = (&self.alignments[0].0, &self.alignments[0].1); let mut offset = 0; let mut length = 0; for (start, end) in &self.alignments { if last == (start, end) { // This is the same group length += 1; } else { // This is a new group if start < last.1 { panic!("We can't have overlapping ranges."); } // Add the old group alignments_original.extend(vec![(offset, offset + length); last.1 - last.0]); offset += length; length = 1; // Eventual gap between the 2 groups alignments_original.extend(vec![(offset, offset); start - last.1]); } last = (start, end); } // Add the last group alignments_original.extend(vec![(offset, offset + length); last.1 - last.0]); // Add eventual last gap offset += length; alignments_original.extend(vec![ (offset, offset); self.original.len() - alignments_original.len() ]); // assert_eq!(alignments_original.len(), self.original.len()); alignments_original } } /// Returns the range covered by a slice of alignments fn expand_alignments(alignments: &[(usize, usize)]) -> Option<std::ops::Range<usize>> { if alignments.is_empty() { None } else { let start = alignments[0].0; let end = alignments[alignments.len() - 1].1; Some(start..end) } } /// Returns a range of the given string slice, by indexing chars instead of bytes pub fn get_range_of<T: RangeBounds<usize>>(s: &str, range: T) -> Option<&str> { let len = s.chars().count(); let start = match range.start_bound() { Bound::Unbounded => 0, Bound::Included(i) => *i, Bound::Excluded(i) => *i + 1, }; let end = match range.end_bound() { Bound::Unbounded => len, Bound::Included(i) => *i + 1, Bound::Excluded(i) => *i, }; if start == 0 && end == 0 { Some(&s[0..0]) } else if start >= len || end > len || start >= end { None } else { let start_b = s.char_indices().map(|(i, _)| i).nth(start).unwrap_or(0); let end_b = s.char_indices().map(|(i, _)| i).nth(end).unwrap_or(s.len()); Some(&s[start_b..end_b]) } } /// Convert the given range from bytes to char pub fn bytes_to_char(s: &str, range: std::ops::Range<usize>) -> Option<std::ops::Range<usize>> { let (mut start, mut end) = if range == (0..0) { (Some(0), Some(0)) } else { (None, None) }; s.char_indices() .enumerate() .take_while(|(_, (b, _))| *b <= range.end) .filter(|(_, (b, _))| *b >= range.start) .for_each(|(i, (b, c))| { if b == range.start { start = Some(i); } if b == range.end { end = Some(i); } if b + c.len_utf8() == range.end { end = Some(i + 1); } }); Some(start?..end?) } /// Convert the given range from char to bytes pub fn char_to_bytes(s: &str, range: std::ops::Range<usize>) -> Option<std::ops::Range<usize>> { let (mut start, mut end) = if range == (0..0) { (Some(0), Some(0)) } else { (None, None) }; if range.start == range.end { s.char_indices() .skip(range.start) .take(1) .for_each(|(b, _)| { start = Some(b); end = Some(b); }); } else { s.char_indices() .skip(range.start) .take(range.end - range.start) .for_each(|(b, c)| { if start.is_none() { start = Some(b); } end = Some(b + c.len_utf8()); }); } Some(start?..end?) } impl From<String> for NormalizedString { fn from(s: String) -> Self { let alignments = s .char_indices() .flat_map(|(b, c)| { let len = c.len_utf8(); (0..len).map(move |_| (b, b + len)) }) .collect::<Vec<_>>(); Self { original: s.clone(), normalized: s, alignments, original_shift: 0, } } } impl From<&str> for NormalizedString { fn from(s: &str) -> Self { Self::from(s.to_owned()) } } #[cfg(test)] mod tests { use super::*; use regex::Regex; use unicode_categories::UnicodeCategories; #[test] fn nfd_adds_new_chars() { let mut n = NormalizedString::from("élégant"); n.nfd(); assert_eq!( &n.alignments, &[ (0, 2), (0, 2), (0, 2), (2, 3), (3, 5), (3, 5), (3, 5), (5, 6), (6, 7), (7, 8), (8, 9) ] ); assert_eq!( n.alignments_original(), vec![ (0, 3), (0, 3), (3, 4), (4, 7), (4, 7), (7, 8), (8, 9), (9, 10), (10, 11) ] ); } #[test] fn remove_chars_added_by_nfd() { let mut n = NormalizedString::from("élégant"); n.nfd().filter(|c| !c.is_mark_nonspacing()); assert_eq!(n.get(), "elegant"); assert_eq!( &n.alignments, &[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 9)] ); assert_eq!( n.alignments_original(), vec![ (0, 1), (0, 1), (1, 2), (2, 3), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7) ] ); } #[test] fn remove_chars() { let mut n = NormalizedString::from("élégant"); n.filter(|c| c != 'n'); assert_eq!(n.get(), "élégat"); assert_eq!( &n.alignments, &[ (0, 2), (0, 2), (2, 3), (3, 5), (3, 5), (5, 6), (6, 7), // Skipped range (8, 9) ] ); assert_eq!( n.alignments_original(), vec![ (0, 2), (0, 2), (2, 3), (3, 5), (3, 5), (5, 6), (6, 7), (7, 7), // Eaten n (7, 8) ] ); } #[test] fn mixed_addition_and_removal() { let mut n = NormalizedString::from("élégant"); n.nfd().filter(|c| !c.is_mark_nonspacing() && c != 'n'); assert_eq!(n.get(), "elegat"); assert_eq!( &n.alignments, &[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (8, 9)] ); assert_eq!( n.alignments_original(), vec![ (0, 1), (0, 1), (1, 2), (2, 3), (2, 3), (3, 4), // g (4, 5), // a (5, 5), // Eaten n (5, 6) ] ); } #[test] fn range_conversion() { let mut n = NormalizedString::from(" __Hello__ "); n.filter(|c| !c.is_whitespace()).lowercase(); let hello_n = n.convert_offsets(Range::Original(6..11)); assert_eq!(hello_n, Some(2..7)); assert_eq!( n.get_range(Range::Normalized(hello_n.clone().unwrap())), Some("hello") ); assert_eq!( n.get_range_original(Range::Normalized(hello_n.unwrap())), Some("Hello") ); assert_eq!(n.get_range(Range::Original(6..11)), Some("hello")); assert_eq!(n.get_range_original(Range::Original(6..11)), Some("Hello")); // Make sure we get None only in specific cases assert_eq!(n.convert_offsets(Range::Original(0..0)), Some(0..0)); assert_eq!(n.convert_offsets(Range::Original(3..3)), Some(3..3)); assert_eq!(n.convert_offsets(Range::Original(15..)), Some(9..9)); assert_eq!(n.convert_offsets(Range::Original(16..)), Some(16..16)); assert_eq!(n.convert_offsets(Range::Original(17..)), None); assert_eq!(n.convert_offsets(Range::Normalized(0..0)), Some(0..0)); assert_eq!(n.convert_offsets(Range::Normalized(3..3)), Some(3..3)); assert_eq!(n.convert_offsets(Range::Normalized(9..)), Some(9..9)); assert_eq!(n.convert_offsets(Range::Normalized(10..)), None); } #[test] fn original_range() { let mut n = NormalizedString::from("Hello_______ World!"); n.filter(|c| c != '_').lowercase(); let world_n = n.get_range(Range::Normalized(6..11)).unwrap(); let world_o = n.get_range_original(Range::Normalized(6..11)).unwrap(); assert_eq!(world_n, "world"); assert_eq!(world_o, "World"); let original_range = Range::Original(n.convert_offsets(Range::Normalized(6..11)).unwrap()); assert_eq!(n.get_range(original_range.clone()).unwrap(), "world"); assert_eq!( n.get_range_original(original_range.clone()).unwrap(), "World" ); assert_eq!(original_range.into_full_range(n.len_original()), 13..18); } #[test] fn added_around_edges() { let mut n = NormalizedString::from("Hello"); n.transform( vec![ (' ', 1), ('H', 0), ('e', 0), ('l', 0), ('l', 0), ('o', 0), (' ', 1), ], 0, ); assert_eq!(&n.normalized, " Hello "); assert_eq!( n.get_range_original(Range::Normalized(1..n.normalized.len() - 1)), Some("Hello") ); } #[test] fn added_characters_alignment() { let mut n = NormalizedString::from("野口 No"); n.transform( n.get().to_owned().chars().flat_map(|c| { if (c as usize) > 0x4E00 { vec![(' ', 0), (c, 1), (' ', 1)] } else { vec![(c, 0)] } }), 0, ); assert_eq!( n, NormalizedString { original: "野口 No".into(), normalized: " 野 口 No".into(), alignments: vec![ (0, 3), (0, 3), (0, 3), (0, 3), (0, 3), (3, 6), (3, 6), (3, 6), (3, 6), (3, 6), (6, 7), (7, 8), (8, 9) ], original_shift: 0 } ); assert_eq!( n.alignments_original(), vec![ (0, 5), (0, 5), (0, 5), (5, 10), (5, 10), (5, 10), (10, 11), (11, 12), (12, 13) ] ); } #[test] fn remove_at_beginning() { let mut n = NormalizedString::from(" Hello"); n.filter(|c| !c.is_whitespace()); assert_eq!( n.get_range_original(Range::Normalized(1.."Hello".len())), Some("ello") ); assert_eq!( n.get_range_original(Range::Normalized(0..n.normalized.len())), Some("Hello") ); } #[test] fn remove_at_end() { let mut n = NormalizedString::from("Hello "); n.filter(|c| !c.is_whitespace()); assert_eq!(n.get_range_original(Range::Normalized(0..4)), Some("Hell")); assert_eq!( n.get_range_original(Range::Normalized(0..n.normalized.len())), Some("Hello") ); } #[test] fn removed_around_both_edges() { let mut n = NormalizedString::from(" Hello "); n.filter(|c| !c.is_whitespace()); assert_eq!(&n.normalized, "Hello"); assert_eq!( n.get_range_original(Range::Normalized(0.."Hello".len())), Some("Hello") ); assert_eq!( n.get_range_original(Range::Normalized(1.."Hell".len())), Some("ell") ); } #[test] fn lstrip() { let mut n = NormalizedString::from(" This is an example "); n.lstrip(); assert_eq!(&n.normalized, "This is an example "); assert_eq!( n.get_range_original(Range::Normalized(0..n.normalized.len())), Some("This is an example ") ); } #[test] fn rstrip() { let mut n = NormalizedString::from(" This is an example "); n.rstrip(); assert_eq!(&n.normalized, " This is an example"); assert_eq!( n.get_range_original(Range::Normalized(0..n.normalized.len())), Some(" This is an example") ); } #[test] fn strip() { let mut n = NormalizedString::from(" This is an example "); n.strip(); assert_eq!(&n.normalized, "This is an example"); assert_eq!( n.get_range_original(Range::Normalized(0..n.normalized.len())), Some("This is an example") ); } #[test] fn strip_unicode() { let mut n = NormalizedString::from(" 你好asa \n"); n.strip(); assert_eq!(&n.normalized, "你好asa"); assert_eq!( n.get_range_original(Range::Normalized(0..n.normalized.len())), Some("你好asa") ); } #[test] fn prepend() { let mut n = NormalizedString::from("there"); n.prepend("Hey "); assert_eq!(&n.normalized, "Hey there"); assert_eq!( n.alignments, vec![ (0, 1), (0, 1), (0, 1), (0, 1), (0, 1), (1, 2), (2, 3), (3, 4), (4, 5) ] ); assert_eq!(n.convert_offsets(Range::Normalized(0..4)), Some(0..1)); } #[test] fn append() { let mut n = NormalizedString::from("Hey"); n.append(" there"); assert_eq!(&n.normalized, "Hey there"); assert_eq!( n.alignments, vec![ (0, 1), (1, 2), (2, 3), (2, 3), (2, 3), (2, 3), (2, 3), (2, 3), (2, 3) ] ); assert_eq!( n.convert_offsets(Range::Normalized(3.." there".len())), Some(2..3) ); } #[test] fn get_range() { let s = String::from("Hello my name is John 👋"); assert_eq!(get_range_of(&s, ..), Some(&s[..])); assert_eq!(get_range_of(&s, 17..), Some("John 👋")); } #[test] fn slice() { let mut s = NormalizedString::from("𝔾𝕠𝕠𝕕 𝕞𝕠𝕣𝕟𝕚𝕟𝕘"); s.nfkc(); let original_slice = s.slice(Range::Original(0..4)).unwrap(); assert_eq!(original_slice.get(), "G"); assert_eq!(original_slice.get_original(), "𝔾"); let normalized_slice = s.slice(Range::Normalized(0..4)).unwrap(); assert_eq!(normalized_slice.get(), "Good"); assert_eq!(normalized_slice.get_original(), "𝔾𝕠𝕠𝕕"); // Make sure the sliced NormalizedString is still aligned as expected let mut s = NormalizedString::from(" Good Morning! "); s.strip(); // If we keep the whole slice let slice = s.slice(Range::Original(..)).unwrap(); assert_eq!( slice.get_range_original(Range::Normalized(0..4)), Some("Good") ); let slice = s.slice(Range::Normalized(..)).unwrap(); assert_eq!( slice.get_range_original(Range::Normalized(0..4)), Some("Good") ); // If we keep after the modified piece let slice = s.slice(Range::Original(4..15)).unwrap(); assert_eq!( slice.get_range_original(Range::Normalized(0..3)), Some("ood") ); // If we keep only the modified piece let slice = s.slice(Range::Original(3..16)).unwrap(); assert_eq!( slice.get_range_original(Range::Normalized(0..4)), Some("Good") ); } #[test] fn replace() { // Simple let mut s = NormalizedString::from(" Hello friend "); s.replace(' ', "_").unwrap(); assert_eq!(s.get(), "_Hello___friend_"); let mut s = NormalizedString::from("aaaab"); s.replace('a', "b").unwrap(); assert_eq!(s.get(), "bbbbb"); // Overlapping let mut s = NormalizedString::from("aaaab"); s.replace("aaa", "b").unwrap(); assert_eq!(s.get(), "bab"); // Regex let mut s = NormalizedString::from(" Hello friend "); let re = Regex::new(r"\s+").unwrap(); s.replace(&re, "_").unwrap(); assert_eq!(s.get(), "_Hello_friend_"); } #[test] fn split() { use SplitDelimiterBehavior::*; let s = NormalizedString::from("The-final--countdown"); let test = |behavior: SplitDelimiterBehavior, result: Vec<&str>| { let splits = s.split('-', behavior).unwrap(); assert_eq!(splits.iter().map(|n| n.get()).collect::<Vec<_>>(), result); }; test(Removed, vec!["The", "final", "countdown"]); test(Isolated, vec!["The", "-", "final", "-", "-", "countdown"]); test(MergedWithPrevious, vec!["The-", "final-", "-", "countdown"]); test(MergedWithNext, vec!["The", "-final", "-", "-countdown"]); test(Contiguous, vec!["The", "-", "final", "--", "countdown"]); } #[test] fn transform_range_single_bytes() { let s = NormalizedString::from("Hello friend"); // Removing at the beginning let mut current = s.clone(); current.transform_range(Range::Original(0..4), vec![('Y', 0)], 3); assert_eq!( current, NormalizedString { original: "Hello friend".into(), normalized: "Yo friend".into(), alignments: vec![ (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11), (11, 12) ], original_shift: 0, } ); assert_eq!( current.alignments_original(), vec![ (0, 0), (0, 0), (0, 0), (0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9) ] ); // Removing in the middle let mut current = s.clone(); current.transform_range( Range::Original(3..10), vec![('_', 0), ('F', 0), ('R', -2)], 2, ); assert_eq!( current, NormalizedString { original: "Hello friend".into(), normalized: "Hel_FRnd".into(), alignments: vec![ (0, 1), (1, 2), (2, 3), (5, 6), (6, 7), (7, 8), (10, 11), (11, 12) ], original_shift: 0, } ); assert_eq!( current.alignments_original(), vec![ (0, 1), (1, 2), (2, 3), (3, 3), (3, 3), (3, 4), (4, 5), (5, 6), (6, 6), (6, 6), (6, 7), (7, 8) ] ); // Removing at the end let mut current = s.clone(); current.transform_range(Range::Original(5..), vec![('_', 0), ('F', -5)], 0); assert_eq!( current, NormalizedString { original: "Hello friend".into(), normalized: "Hello_F".into(), alignments: vec![(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7)], original_shift: 0, } ); assert_eq!( current.alignments_original(), vec![ (0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 7), (7, 7), (7, 7), (7, 7), (7, 7) ] ); // Adding at the beginning let mut current = s.clone(); current.transform_range(Range::Original(0..1), vec![('H', 1), ('H', 0)], 0); assert_eq!( current, NormalizedString { original: "Hello friend".into(), normalized: "HHello friend".into(), alignments: vec![ (0, 0), (0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11), (11, 12) ], original_shift: 0, } ); assert_eq!( current.alignments_original(), vec![ (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11), (11, 12), (12, 13) ] ); // Equivalent to the previous one let mut current = s.clone(); current.transform_range(Range::Original(0..0), vec![('H', 1)], 0); assert_eq!( current, NormalizedString { original: "Hello friend".into(), normalized: "HHello friend".into(), alignments: vec![ (0, 0), (0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11), (11, 12) ], original_shift: 0, } ); assert_eq!( current.alignments_original(), vec![ (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11), (11, 12), (12, 13) ] ); // Adding as part of the first character let mut current = s.clone(); current.transform_range(Range::Original(0..1), vec![('H', 0), ('H', 1)], 0); assert_eq!( current, NormalizedString { original: "Hello friend".into(), normalized: "HHello friend".into(), alignments: vec![ (0, 1), (0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11), (11, 12) ], original_shift: 0, } ); assert_eq!( current.alignments_original(), vec![ (0, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11), (11, 12), (12, 13) ] ); // Adding in the middle let mut current = s.clone(); current.transform_range( Range::Original(5..6), vec![('_', 0), ('m', 1), ('y', 1), ('_', 1)], 0, ); assert_eq!( current, NormalizedString { original: "Hello friend".into(), normalized: "Hello_my_friend".into(), alignments: vec![ (0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (5, 6), (5, 6), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11), (11, 12) ], original_shift: 0, } ); assert_eq!( current.alignments_original(), vec![ (0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 9), (9, 10), (10, 11), (11, 12), (12, 13), (13, 14), (14, 15) ] ); // Adding at the end let mut current = s; current.transform_range(Range::Original(11..), vec![('d', 0), ('_', 1), ('!', 1)], 0); assert_eq!( current, NormalizedString { original: "Hello friend".into(), normalized: "Hello friend_!".into(), alignments: vec![ (0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11), (11, 12), (11, 12), (11, 12) ], original_shift: 0, } ); assert_eq!( current.alignments_original(), vec![ (0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11), (11, 14) ] ); } #[test] fn transform_range_multiple_bytes() { let s = NormalizedString::from("𝔾𝕠𝕠𝕕"); // Removing at the beginning let mut current = s.clone(); current.transform_range(Range::Original(0..8), vec![('G', -1)], 0); assert_eq!( current, NormalizedString { original: "𝔾𝕠𝕠𝕕".into(), normalized: "G𝕠𝕕".into(), alignments: vec![ (0, 4), (8, 12), (8, 12), (8, 12), (8, 12), (12, 16), (12, 16), (12, 16), (12, 16) ], original_shift: 0, } ); assert_eq!( current.alignments_original(), vec![ (0, 1), (0, 1), (0, 1), (0, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 5), (1, 5), (1, 5), (1, 5), (5, 9), (5, 9), (5, 9), (5, 9) ] ); assert_eq!(current.get_range(Range::Original(0..8)).unwrap(), "G"); assert_eq!(current.get_range(Range::Original(0..4)).unwrap(), "G"); assert_eq!( current.get_range_original(Range::Original(0..4)).unwrap(), "𝔾" ); assert_eq!( current.get_range_original(Range::Original(0..8)).unwrap(), "𝔾𝕠" ); // Removing in the middle let mut current = s.clone(); current.transform_range(Range::Original(4..12), vec![('o', -1)], 0); assert_eq!( current, NormalizedString { original: "𝔾𝕠𝕠𝕕".into(), normalized: "𝔾o𝕕".into(), alignments: vec![ (0, 4), (0, 4), (0, 4), (0, 4), (4, 8), (12, 16), (12, 16), (12, 16), (12, 16) ], original_shift: 0, } ); assert_eq!( current.alignments_original(), vec![ (0, 4), (0, 4), (0, 4), (0, 4), (4, 5), (4, 5), (4, 5), (4, 5), (5, 5), (5, 5), (5, 5), (5, 5), (5, 9), (5, 9), (5, 9), (5, 9) ] ); // Removing at the end let mut current = s.clone(); current.transform_range(Range::Original(12..), vec![('d', 0), ('!', 1)], 0); assert_eq!( current, NormalizedString { original: "𝔾𝕠𝕠𝕕".into(), normalized: "𝔾𝕠𝕠d!".into(), alignments: vec![ (0, 4), (0, 4), (0, 4), (0, 4), (4, 8), (4, 8), (4, 8), (4, 8), (8, 12), (8, 12), (8, 12), (8, 12), (12, 16), (12, 16) ], original_shift: 0, } ); // Adding at the beginning let mut current = s.clone(); current.transform_range(Range::Original(0..4), vec![('_', 1), ('𝔾', 0)], 0); assert_eq!( current, NormalizedString { original: "𝔾𝕠𝕠𝕕".into(), normalized: "_𝔾𝕠𝕠𝕕".into(), alignments: vec![ (0, 0), (0, 4), (0, 4), (0, 4), (0, 4), (4, 8), (4, 8), (4, 8), (4, 8), (8, 12), (8, 12), (8, 12), (8, 12), (12, 16), (12, 16), (12, 16), (12, 16) ], original_shift: 0, } ); assert_eq!( current.alignments_original(), vec![ (1, 5), (1, 5), (1, 5), (1, 5), (5, 9), (5, 9), (5, 9), (5, 9), (9, 13), (9, 13), (9, 13), (9, 13), (13, 17), (13, 17), (13, 17), (13, 17) ] ); assert_eq!(current.get_range(Range::Original(0..8)).unwrap(), "𝔾𝕠"); assert_eq!(current.get_range(Range::Original(0..4)).unwrap(), "𝔾"); assert_eq!( current.get_range_original(Range::Original(0..4)).unwrap(), "𝔾" ); assert_eq!( current.get_range_original(Range::Original(0..8)).unwrap(), "𝔾𝕠" ); // Equivalent to the previous one let mut current = s.clone(); current.transform_range(Range::Original(0..0), vec![('_', 1)], 0); assert_eq!( current, NormalizedString { original: "𝔾𝕠𝕠𝕕".into(), normalized: "_𝔾𝕠𝕠𝕕".into(), alignments: vec![ (0, 0), (0, 4), (0, 4), (0, 4), (0, 4), (4, 8), (4, 8), (4, 8), (4, 8), (8, 12), (8, 12), (8, 12), (8, 12), (12, 16), (12, 16), (12, 16), (12, 16) ], original_shift: 0, } ); assert_eq!( current.alignments_original(), vec![ (1, 5), (1, 5), (1, 5), (1, 5), (5, 9), (5, 9), (5, 9), (5, 9), (9, 13), (9, 13), (9, 13), (9, 13), (13, 17), (13, 17), (13, 17), (13, 17) ] ); assert_eq!(current.get_range(Range::Original(0..8)).unwrap(), "𝔾𝕠"); assert_eq!(current.get_range(Range::Original(0..4)).unwrap(), "𝔾"); assert_eq!( current.get_range_original(Range::Original(0..4)).unwrap(), "𝔾" ); assert_eq!( current.get_range_original(Range::Original(0..8)).unwrap(), "𝔾𝕠" ); // Adding as part of the first character let mut current = s.clone(); current.transform_range(Range::Original(0..4), vec![('𝔾', 0), ('o', 1)], 0); assert_eq!( current, NormalizedString { original: "𝔾𝕠𝕠𝕕".into(), normalized: "𝔾o𝕠𝕠𝕕".into(), alignments: vec![ (0, 4), (0, 4), (0, 4), (0, 4), (0, 4), (4, 8), (4, 8), (4, 8), (4, 8), (8, 12), (8, 12), (8, 12), (8, 12), (12, 16), (12, 16), (12, 16), (12, 16) ], original_shift: 0, } ); assert_eq!( current.alignments_original(), vec![ (0, 5), (0, 5), (0, 5), (0, 5), (5, 9), (5, 9), (5, 9), (5, 9), (9, 13), (9, 13), (9, 13), (9, 13), (13, 17), (13, 17), (13, 17), (13, 17) ] ); assert_eq!(current.get_range(Range::Original(0..8)).unwrap(), "𝔾o𝕠"); assert_eq!(current.get_range(Range::Original(0..4)).unwrap(), "𝔾o"); assert_eq!( current.get_range_original(Range::Original(0..4)).unwrap(), "𝔾" ); assert_eq!( current.get_range_original(Range::Original(0..8)).unwrap(), "𝔾𝕠" ); // Adding in the middle let mut current = s.clone(); current.transform_range( Range::Original(4..8), vec![('𝕠', 0), ('o', 1), ('o', 1), ('o', 1)], 0, ); assert_eq!( current, NormalizedString { original: "𝔾𝕠𝕠𝕕".into(), normalized: "𝔾𝕠ooo𝕠𝕕".into(), alignments: vec![ (0, 4), (0, 4), (0, 4), (0, 4), (4, 8), (4, 8), (4, 8), (4, 8), (4, 8), (4, 8), (4, 8), (8, 12), (8, 12), (8, 12), (8, 12), (12, 16), (12, 16), (12, 16), (12, 16) ], original_shift: 0, } ); assert_eq!( current.alignments_original(), vec![ (0, 4), (0, 4), (0, 4), (0, 4), (4, 11), (4, 11), (4, 11), (4, 11), (11, 15), (11, 15), (11, 15), (11, 15), (15, 19), (15, 19), (15, 19), (15, 19) ] ); // Adding at the end let mut current = s; current.transform_range(Range::Original(16..), vec![('!', 1)], 0); assert_eq!( current, NormalizedString { original: "𝔾𝕠𝕠𝕕".into(), normalized: "𝔾𝕠𝕠𝕕!".into(), alignments: vec![ (0, 4), (0, 4), (0, 4), (0, 4), (4, 8), (4, 8), (4, 8), (4, 8), (8, 12), (8, 12), (8, 12), (8, 12), (12, 16), (12, 16), (12, 16), (12, 16), (12, 16) ], original_shift: 0, } ); assert_eq!( current.alignments_original(), vec![ (0, 4), (0, 4), (0, 4), (0, 4), (4, 8), (4, 8), (4, 8), (4, 8), (8, 12), (8, 12), (8, 12), (8, 12), (12, 17), (12, 17), (12, 17), (12, 17) ] ); } #[test] fn transform_check() { let mut s = NormalizedString::from("abc…"); s.nfkd(); let transforms = vec![('a', -2), ('.', 0), ('.', 0), ('.', 0)]; s.transform(transforms, 0); s.lowercase(); assert_eq!(s.get(), "a..."); } }
tokenizers/tokenizers/src/tokenizer/normalizer.rs/0
{ "file_path": "tokenizers/tokenizers/src/tokenizer/normalizer.rs", "repo_id": "tokenizers", "token_count": 42406 }
246
use tokenizers::models::bpe::{BpeTrainerBuilder, BPE}; use tokenizers::normalizers::{Sequence, Strip, NFC}; use tokenizers::pre_tokenizers::byte_level::ByteLevel; use tokenizers::{AddedToken, TokenizerBuilder}; use tokenizers::{DecoderWrapper, NormalizerWrapper, PostProcessorWrapper, PreTokenizerWrapper}; use tokenizers::{Tokenizer, TokenizerImpl}; #[test] fn train_tokenizer() { let vocab_size: usize = 100; let mut tokenizer = TokenizerBuilder::new() .with_model(BPE::default()) .with_normalizer(Some(Sequence::new(vec![ Strip::new(true, true).into(), NFC.into(), ]))) .with_pre_tokenizer(Some(ByteLevel::default())) .with_post_processor(Some(ByteLevel::default())) .with_decoder(Some(ByteLevel::default())) .build() .unwrap(); let mut trainer = BpeTrainerBuilder::new() .show_progress(false) .vocab_size(vocab_size) .min_frequency(0) .special_tokens(vec![ AddedToken::from(String::from("<s>"), true), AddedToken::from(String::from("<pad>"), true), AddedToken::from(String::from("</s>"), true), AddedToken::from(String::from("<unk>"), true), AddedToken::from(String::from("<mask>"), true), ]) .build(); let pretty = true; tokenizer .train_from_files(&mut trainer, vec!["data/small.txt".to_string()]) .unwrap() .save("data/tokenizer.json", pretty) .unwrap(); } #[test] fn load_tokenizer() { let tokenizer = Tokenizer::from_file("data/roberta.json").unwrap(); let example = "This is an example"; let ids = vec![713, 16, 41, 1246]; let tokens = vec!["This", "Ġis", "Ġan", "Ġexample"]; let encodings = tokenizer.encode(example, false).unwrap(); assert_eq!(encodings.get_ids(), ids); assert_eq!(encodings.get_tokens(), tokens); let decoded = tokenizer.decode(&ids, false).unwrap(); assert_eq!(decoded, example); } #[test] #[ignore] fn quicktour_slow_train() -> tokenizers::Result<()> { // START quicktour_init_tokenizer use tokenizers::models::bpe::BPE; let mut tokenizer: TokenizerImpl< BPE, NormalizerWrapper, PreTokenizerWrapper, PostProcessorWrapper, DecoderWrapper, > = TokenizerImpl::new( BPE::builder() .unk_token("[UNK]".to_string()) .build() .unwrap(), ); // END quicktour_init_tokenizer // START quicktour_init_trainer use tokenizers::models::bpe::BpeTrainer; let mut trainer = BpeTrainer::builder() .special_tokens(vec![ AddedToken::from("[UNK]", true), AddedToken::from("[CLS]", true), AddedToken::from("[SEP]", true), AddedToken::from("[PAD]", true), AddedToken::from("[MASK]", true), ]) .build(); // END quicktour_init_trainer // START quicktour_init_pretok use tokenizers::pre_tokenizers::whitespace::Whitespace; tokenizer.with_pre_tokenizer(Whitespace {}); // END quicktour_init_pretok // START quicktour_train let files = vec![ "data/wikitext-103-raw/wiki.train.raw".into(), "data/wikitext-103-raw/wiki.test.raw".into(), "data/wikitext-103-raw/wiki.valid.raw".into(), ]; tokenizer.train_from_files(&mut trainer, files)?; // END quicktour_train // START quicktour_save tokenizer.save("data/tokenizer-wiki.json", false)?; // END quicktour_save Ok(()) } #[test] fn quicktour() -> tokenizers::Result<()> { // START quicktour_reload_tokenizer let mut tokenizer = Tokenizer::from_file("data/tokenizer-wiki.json")?; // END quicktour_reload_tokenizer // START quicktour_encode let output = tokenizer.encode("Hello, y'all! How are you 😁 ?", true)?; // END quicktour_encode // START quicktour_print_tokens println!("{:?}", output.get_tokens()); // ["Hello", ",", "y", "'", "all", "!", "How", "are", "you", "[UNK]", "?",] // END quicktour_print_tokens assert_eq!( output.get_tokens(), ["Hello", ",", "y", "'", "all", "!", "How", "are", "you", "[UNK]", "?",] ); // START quicktour_print_ids println!("{:?}", output.get_ids()); // [27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35] // END quicktour_print_ids assert_eq!( output.get_ids(), [27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35] ); // START quicktour_print_offsets println!("{:?}", output.get_offsets()[9]); // (26, 30) // END quicktour_print_offsets assert_eq!(output.get_offsets()[9], (26, 30)); // START quicktour_use_offsets let sentence = "Hello, y'all! How are you 😁 ?"; println!("{}", &sentence[26..30]); // "😁" // END quicktour_use_offsets // START quicktour_check_sep println!("{}", tokenizer.token_to_id("[SEP]").unwrap()); // 2 // END quicktour_check_sep assert_eq!(tokenizer.token_to_id("[SEP]"), Some(2)); // START quicktour_init_template_processing use tokenizers::processors::template::TemplateProcessing; let special_tokens = vec![ ("[CLS]", tokenizer.token_to_id("[CLS]").unwrap()), ("[SEP]", tokenizer.token_to_id("[SEP]").unwrap()), ]; tokenizer.with_post_processor( TemplateProcessing::builder() .try_single("[CLS] $A [SEP]") .unwrap() .try_pair("[CLS] $A [SEP] $B:1 [SEP]:1") .unwrap() .special_tokens(special_tokens) .build()?, ); // END quicktour_init_template_processing // START quicktour_print_special_tokens let output = tokenizer.encode("Hello, y'all! How are you 😁 ?", true)?; println!("{:?}", output.get_tokens()); // ["[CLS]", "Hello", ",", "y", "'", "all", "!", "How", "are", "you", "[UNK]", "?", "[SEP]"] // END quicktour_print_special_tokens assert_eq!( output.get_tokens(), ["[CLS]", "Hello", ",", "y", "'", "all", "!", "How", "are", "you", "[UNK]", "?", "[SEP]"] ); // START quicktour_print_special_tokens_pair let output = tokenizer.encode(("Hello, y'all!", "How are you 😁 ?"), true)?; println!("{:?}", output.get_tokens()); // ["[CLS]", "Hello", ",", "y", "'", "all", "!", "[SEP]", "How", "are", "you", "[UNK]", "?", "[SEP]"] // END quicktour_print_special_tokens_pair assert_eq!( output.get_tokens(), [ "[CLS]", "Hello", ",", "y", "'", "all", "!", "[SEP]", "How", "are", "you", "[UNK]", "?", "[SEP]" ] ); // START quicktour_print_type_ids println!("{:?}", output.get_type_ids()); // [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] // END quicktour_print_type_ids assert_eq!( output.get_type_ids(), [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] ); // START quicktour_encode_batch let output = tokenizer.encode_batch(vec!["Hello, y'all!", "How are you 😁 ?"], true)?; // END quicktour_encode_batch println!("{:?}", output); // START quicktour_encode_batch_pair let output = tokenizer.encode_batch( vec![ ("Hello, y'all!", "How are you 😁 ?"), ("Hello to you too!", "I'm fine, thank you!"), ], true, )?; // END quicktour_encode_batch_pair println!("{:?}", output); // START quicktour_enable_padding use tokenizers::PaddingParams; tokenizer.with_padding(Some(PaddingParams { pad_id: 3, pad_token: "[PAD]".to_string(), ..PaddingParams::default() })); // END quicktour_enable_padding // START quicktour_print_batch_tokens let output = tokenizer.encode_batch(vec!["Hello, y'all!", "How are you 😁 ?"], true)?; println!("{:?}", output[1].get_tokens()); // ["[CLS]", "How", "are", "you", "[UNK]", "?", "[SEP]", "[PAD]"] // END quicktour_print_batch_tokens assert_eq!( output[1].get_tokens(), ["[CLS]", "How", "are", "you", "[UNK]", "?", "[SEP]", "[PAD]"] ); // START quicktour_print_attention_mask println!("{:?}", output[1].get_attention_mask()); // [1, 1, 1, 1, 1, 1, 1, 0] // END quicktour_print_attention_mask assert_eq!(output[1].get_attention_mask(), [1, 1, 1, 1, 1, 1, 1, 0]); Ok(()) } #[test] fn pipeline() -> tokenizers::Result<()> { // START pipeline_reload_tokenizer use tokenizers::Tokenizer; let mut tokenizer = Tokenizer::from_file("data/tokenizer-wiki.json")?; // END pipeline_reload_tokenizer // START pipeline_setup_normalizer use tokenizers::normalizers::{ strip::StripAccents, unicode::NFD, utils::Sequence as NormalizerSequence, }; let normalizer = NormalizerSequence::new(vec![NFD.into(), StripAccents.into()]); // END pipeline_setup_normalizer // START pipeline_test_normalizer use tokenizers::{NormalizedString, Normalizer}; let mut normalized = NormalizedString::from("Héllò hôw are ü?"); normalizer.normalize(&mut normalized)?; println!("{}", normalized.get()); // "Hello how are u?" // END pipeline_test_normalizer assert_eq!(normalized.get(), "Hello how are u?"); // START pipeline_replace_normalizer tokenizer.with_normalizer(normalizer); // END pipeline_replace_normalizer // START pipeline_setup_pre_tokenizer use tokenizers::pre_tokenizers::whitespace::Whitespace; use tokenizers::{OffsetReferential, OffsetType, PreTokenizedString, PreTokenizer}; let pre_tokenizer = Whitespace {}; let mut pre_tokenized = PreTokenizedString::from("Hello! How are you? I'm fine, thank you."); pre_tokenizer.pre_tokenize(&mut pre_tokenized)?; println!( "{:?}", pre_tokenized.get_splits(OffsetReferential::Original, OffsetType::Byte) ); // [("Hello", (0, 5), None), ("!", (5, 6), None), ("How", (7, 10), None), // ("are", (11, 14), None), ("you", (15, 18), None), ("?", (18, 19), None), // ("I", (20, 21), None), ("\'", (21, 22), None), ("m", (22, 23), None), // ("fine", (24, 28), None), (",", (28, 29), None), ("thank", (30, 35), None), // ("you", (36, 39), None), (".", (39, 40), None)] // END pipeline_setup_pre_tokenizer assert_eq!( pre_tokenized.get_splits(OffsetReferential::Original, OffsetType::Byte), vec![ ("Hello", (0, 5), &None), ("!", (5, 6), &None), ("How", (7, 10), &None), ("are", (11, 14), &None), ("you", (15, 18), &None), ("?", (18, 19), &None), ("I", (20, 21), &None), ("\'", (21, 22), &None), ("m", (22, 23), &None), ("fine", (24, 28), &None), (",", (28, 29), &None), ("thank", (30, 35), &None), ("you", (36, 39), &None), (".", (39, 40), &None) ] ); // START pipeline_combine_pre_tokenizer use tokenizers::pre_tokenizers::{digits::Digits, sequence::Sequence}; let pre_tokenizer = Sequence::new(vec![Whitespace {}.into(), Digits::new(true).into()]); let mut pre_tokenized = PreTokenizedString::from("Call 911!"); pre_tokenizer.pre_tokenize(&mut pre_tokenized)?; println!( "{:?}", pre_tokenized.get_splits(OffsetReferential::Original, OffsetType::Byte) ); // END pipeline_combine_pre_tokenizer assert_eq!( pre_tokenized.get_splits(OffsetReferential::Original, OffsetType::Byte), vec![ ("Call", (0, 4), &None), ("9", (5, 6), &None), ("1", (6, 7), &None), ("1", (7, 8), &None), ("!", (8, 9), &None) ] ); // START pipeline_replace_pre_tokenizer tokenizer.with_pre_tokenizer(pre_tokenizer); // END pipeline_replace_pre_tokenizer // START pipeline_setup_processor use tokenizers::processors::template::TemplateProcessing; tokenizer.with_post_processor( TemplateProcessing::builder() .try_single("[CLS] $A [SEP]") .unwrap() .try_pair("[CLS] $A [SEP] $B:1 [SEP]:1") .unwrap() .special_tokens(vec![("[CLS]", 1), ("[SEP]", 2)]) .build() .unwrap(), ); // END pipeline_setup_processor // START pipeline_test_decoding let output = tokenizer.encode("Hello, y'all! How are you 😁 ?", true)?; println!("{:?}", output.get_ids()); // [1, 27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35, 2] let decoded = tokenizer.decode( &[1, 27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35, 2], true, )?; println!("{}", decoded); // "Hello , y ' all ! How are you ?" // END pipeline_test_decoding Ok(()) } #[test] #[ignore] fn train_pipeline_bert() -> tokenizers::Result<()> { // START bert_setup_tokenizer use tokenizers::models::wordpiece::WordPiece; use tokenizers::Tokenizer; let mut bert_tokenizer = Tokenizer::new( WordPiece::builder() .unk_token("[UNK]".to_string()) .build() .unwrap(), ); // END bert_setup_tokenizer // START bert_setup_normalizer use tokenizers::normalizers::utils::Sequence as NormalizerSequence; use tokenizers::normalizers::{strip::StripAccents, unicode::NFD, utils::Lowercase}; bert_tokenizer.with_normalizer(NormalizerSequence::new(vec![ NFD.into(), Lowercase.into(), StripAccents.into(), ])); // END bert_setup_normalizer // START bert_setup_pre_tokenizer use tokenizers::pre_tokenizers::whitespace::Whitespace; bert_tokenizer.with_pre_tokenizer(Whitespace {}); // END bert_setup_pre_tokenizer // START bert_setup_processor use tokenizers::processors::template::TemplateProcessing; bert_tokenizer.with_post_processor( TemplateProcessing::builder() .try_single("[CLS] $A [SEP]") .unwrap() .try_pair("[CLS] $A [SEP] $B:1 [SEP]:1") .unwrap() .special_tokens(vec![("[CLS]", 1), ("[SEP]", 2)]) .build() .unwrap(), ); // END bert_setup_processor // START bert_train_tokenizer use tokenizers::models::{wordpiece::WordPieceTrainer, TrainerWrapper}; let mut trainer: TrainerWrapper = WordPieceTrainer::builder() .vocab_size(30_522) .special_tokens(vec![ AddedToken::from("[UNK]", true), AddedToken::from("[CLS]", true), AddedToken::from("[SEP]", true), AddedToken::from("[PAD]", true), AddedToken::from("[MASK]", true), ]) .build() .into(); let files = vec![ "data/wikitext-103-raw/wiki.train.raw".into(), "data/wikitext-103-raw/wiki.test.raw".into(), "data/wikitext-103-raw/wiki.valid.raw".into(), ]; bert_tokenizer.train_from_files(&mut trainer, files)?; bert_tokenizer.save("data/bert-wiki.json", false)?; // END bert_train_tokenizer Ok(()) } #[test] fn pipeline_bert() -> tokenizers::Result<()> { let mut bert_tokenizer = Tokenizer::from_file("data/bert-wiki.json")?; // START bert_test_decoding let output = bert_tokenizer.encode("Welcome to the 🤗 Tokenizers library.", true)?; println!("{:?}", output.get_tokens()); // ["[CLS]", "welcome", "to", "the", "[UNK]", "tok", "##eni", "##zer", "##s", "library", ".", "[SEP]"] let decoded = bert_tokenizer.decode(output.get_ids(), true)?; println!("{}", decoded); // "welcome to the tok ##eni ##zer ##s library ." // END bert_test_decoding assert_eq!( output.get_tokens(), &[ "[CLS]", "welcome", "to", "the", "[UNK]", "tok", "##eni", "##zer", "##s", "library", ".", "[SEP]" ] ); assert_eq!(decoded, "welcome to the tok ##eni ##zer ##s library ."); // START bert_proper_decoding use tokenizers::decoders::wordpiece::WordPiece as WordPieceDecoder; bert_tokenizer.with_decoder(WordPieceDecoder::default()); let decoded = bert_tokenizer.decode(output.get_ids(), true)?; // "welcome to the tokenizers library." // END bert_proper_decoding assert_eq!(decoded, "welcome to the tokenizers library."); Ok(()) }
tokenizers/tokenizers/tests/documentation.rs/0
{ "file_path": "tokenizers/tokenizers/tests/documentation.rs", "repo_id": "tokenizers", "token_count": 7402 }
247
.PHONY: deps_table_update modified_only_fixup extra_style_checks quality style fixup fix-copies test test-examples # make sure to test the local checkout in scripts and not the pre-installed one (don't use quotes!) export PYTHONPATH = src check_dirs := examples tests src utils exclude_folders := examples/research_projects modified_only_fixup: $(eval modified_py_files := $(shell python utils/get_modified_files.py $(check_dirs))) @if test -n "$(modified_py_files)"; then \ echo "Checking/fixing $(modified_py_files)"; \ ruff check $(modified_py_files) --fix --exclude $(exclude_folders); \ ruff format $(modified_py_files) --exclude $(exclude_folders);\ else \ echo "No library .py files were modified"; \ fi # Update src/transformers/dependency_versions_table.py deps_table_update: @python setup.py deps_table_update deps_table_check_updated: @md5sum src/transformers/dependency_versions_table.py > md5sum.saved @python setup.py deps_table_update @md5sum -c --quiet md5sum.saved || (printf "\nError: the version dependency table is outdated.\nPlease run 'make fixup' or 'make style' and commit the changes.\n\n" && exit 1) @rm md5sum.saved # autogenerating code autogenerate_code: deps_table_update # Check that the repo is in a good state repo-consistency: python utils/check_copies.py python utils/check_table.py python utils/check_dummies.py python utils/check_repo.py python utils/check_inits.py python utils/check_config_docstrings.py python utils/check_config_attributes.py python utils/check_doctest_list.py python utils/update_metadata.py --check-only python utils/check_task_guides.py python utils/check_docstrings.py python utils/check_support_list.py # this target runs checks on all files quality: ruff check $(check_dirs) setup.py conftest.py ruff format --check $(check_dirs) setup.py conftest.py python utils/custom_init_isort.py --check_only python utils/sort_auto_mappings.py --check_only python utils/check_doc_toc.py # Format source code automatically and check is there are any problems left that need manual fixing extra_style_checks: python utils/custom_init_isort.py python utils/sort_auto_mappings.py python utils/check_doc_toc.py --fix_and_overwrite # this target runs checks on all files and potentially modifies some of them style: ruff check $(check_dirs) setup.py conftest.py --fix --exclude $(exclude_folders) ruff format $(check_dirs) setup.py conftest.py --exclude $(exclude_folders) ${MAKE} autogenerate_code ${MAKE} extra_style_checks # Super fast fix and check target that only works on relevant modified files since the branch was made fixup: modified_only_fixup extra_style_checks autogenerate_code repo-consistency # Make marked copies of snippets of codes conform to the original fix-copies: python utils/check_copies.py --fix_and_overwrite python utils/check_table.py --fix_and_overwrite python utils/check_dummies.py --fix_and_overwrite python utils/check_doctest_list.py --fix_and_overwrite python utils/check_task_guides.py --fix_and_overwrite python utils/check_docstrings.py --fix_and_overwrite # Run tests for the library test: python -m pytest -n auto --dist=loadfile -s -v ./tests/ # Run tests for examples test-examples: python -m pytest -n auto --dist=loadfile -s -v ./examples/pytorch/ # Run tests for SageMaker DLC release test-sagemaker: # install sagemaker dependencies in advance with pip install .[sagemaker] TEST_SAGEMAKER=True python -m pytest -n auto -s -v ./tests/sagemaker # Release stuff pre-release: python utils/release.py pre-patch: python utils/release.py --patch post-release: python utils/release.py --post_release post-patch: python utils/release.py --post_release --patch build-release: rm -rf dist rm -rf build python setup.py bdist_wheel python setup.py sdist python utils/check_build.py
transformers/Makefile/0
{ "file_path": "transformers/Makefile", "repo_id": "transformers", "token_count": 1325 }
248
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest import pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser NOT_DEVICE_TESTS = { "test_tokenization", "test_processor", "test_processing", "test_beam_constraints", "test_configuration_utils", "test_data_collator", "test_trainer_callback", "test_trainer_utils", "test_feature_extraction", "test_image_processing", "test_image_processor", "test_image_transforms", "test_optimization", "test_retrieval", "test_config", "test_from_pretrained_no_checkpoint", "test_keep_in_fp32_modules", "test_gradient_checkpointing_backward_compatibility", "test_gradient_checkpointing_enable_disable", "test_save_load_fast_init_from_base", "test_fast_init_context_manager", "test_fast_init_tied_embeddings", "test_save_load_fast_init_to_base", "test_torch_save_load", "test_initialization", "test_forward_signature", "test_model_common_attributes", "test_model_main_input_name", "test_correct_missing_keys", "test_tie_model_weights", "test_can_use_safetensors", "test_load_save_without_tied_weights", "test_tied_weights_keys", "test_model_weights_reload_no_missing_tied_weights", "test_pt_tf_model_equivalence", "test_mismatched_shapes_have_properly_initialized_weights", "test_matched_shapes_have_loaded_weights_when_some_mismatched_shapes_exist", "test_model_is_small", "test_tf_from_pt_safetensors", "test_flax_from_pt_safetensors", "ModelTest::test_pipeline_", # None of the pipeline tests from PipelineTesterMixin (of which XxxModelTest inherits from) are running on device "ModelTester::test_pipeline_", "/repo_utils/", "/utils/", "/tools/", } # allow having multiple repository checkouts and not needing to remember to rerun # `pip install -e '.[dev]'` when switching between checkouts and running tests. git_repo_path = abspath(join(dirname(__file__), "src")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="ignore", category=FutureWarning) def pytest_configure(config): config.addinivalue_line( "markers", "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" ) config.addinivalue_line( "markers", "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" ) config.addinivalue_line("markers", "is_pipeline_test: mark test to run only when pipelines are tested") config.addinivalue_line("markers", "is_staging_test: mark test to run only in the staging environment") config.addinivalue_line("markers", "accelerate_tests: mark test that require accelerate") config.addinivalue_line("markers", "tool_tests: mark the tool tests that are run on their specific schedule") config.addinivalue_line("markers", "not_device_test: mark the tests always running on cpu") def pytest_collection_modifyitems(items): for item in items: if any(test_name in item.nodeid for test_name in NOT_DEVICE_TESTS): item.add_marker(pytest.mark.not_device_test) def pytest_addoption(parser): from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(parser) def pytest_terminal_summary(terminalreporter): from transformers.testing_utils import pytest_terminal_summary_main make_reports = terminalreporter.config.getoption("--make-reports") if make_reports: pytest_terminal_summary_main(terminalreporter, id=make_reports) def pytest_sessionfinish(session, exitstatus): # If no tests are collected, pytest exists with code 5, which makes the CI fail. if exitstatus == 5: session.exitstatus = 0 # Doctest custom flag to ignore output. IGNORE_RESULT = doctest.register_optionflag("IGNORE_RESULT") OutputChecker = doctest.OutputChecker class CustomOutputChecker(OutputChecker): def check_output(self, want, got, optionflags): if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self, want, got, optionflags) doctest.OutputChecker = CustomOutputChecker _pytest.doctest.DoctestModule = HfDoctestModule doctest.DocTestParser = HfDocTestParser
transformers/conftest.py/0
{ "file_path": "transformers/conftest.py", "repo_id": "transformers", "token_count": 1803 }
249
<!--- Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Generating the documentation To generate the documentation, you first have to build it. Several packages are necessary to build the doc, you can install them with the following command, at the root of the code repository: ```bash pip install -e ".[docs]" ``` Then you need to install our special tool that builds the documentation: ```bash pip install git+https://github.com/huggingface/doc-builder ``` --- **NOTE** You only need to generate the documentation to inspect it locally (if you're planning changes and want to check how they look before committing for instance). You don't have to commit the built documentation. --- ## Building the documentation Once you have setup the `doc-builder` and additional packages, you can generate the documentation by typing the following command: ```bash doc-builder build transformers docs/source/en/ --build_dir ~/tmp/test-build ``` You can adapt the `--build_dir` to set any temporary folder that you prefer. This command will create it and generate the MDX files that will be rendered as the documentation on the main website. You can inspect them in your favorite Markdown editor. ## Previewing the documentation To preview the docs, first install the `watchdog` module with: ```bash pip install watchdog ``` Then run the following command: ```bash doc-builder preview {package_name} {path_to_docs} ``` For example: ```bash doc-builder preview transformers docs/source/en/ ``` The docs will be viewable at [http://localhost:3000](http://localhost:3000). You can also preview the docs once you have opened a PR. You will see a bot add a comment to a link where the documentation with your changes lives. --- **NOTE** The `preview` command only works with existing doc files. When you add a completely new file, you need to update `_toctree.yml` & restart `preview` command (`ctrl-c` to stop it & call `doc-builder preview ...` again). --- ## Adding a new element to the navigation bar Accepted files are Markdown (.md). Create a file with its extension and put it in the source directory. You can then link it to the toc-tree by putting the filename without the extension in the [`_toctree.yml`](https://github.com/huggingface/transformers/blob/main/docs/source/en/_toctree.yml) file. ## Renaming section headers and moving sections It helps to keep the old links working when renaming the section header and/or moving sections from one document to another. This is because the old links are likely to be used in Issues, Forums, and Social media and it'd make for a much more superior user experience if users reading those months later could still easily navigate to the originally intended information. Therefore, we simply keep a little map of moved sections at the end of the document where the original section was. The key is to preserve the original anchor. So if you renamed a section from: "Section A" to "Section B", then you can add at the end of the file: ``` Sections that were moved: [ <a href="#section-b">Section A</a><a id="section-a"></a> ] ``` and of course, if you moved it to another file, then: ``` Sections that were moved: [ <a href="../new-file#section-b">Section A</a><a id="section-a"></a> ] ``` Use the relative style to link to the new file so that the versioned docs continue to work. For an example of a rich moved section set please see the very end of [the Trainer doc](https://github.com/huggingface/transformers/blob/main/docs/source/en/main_classes/trainer.md). ## Writing Documentation - Specification The `huggingface/transformers` documentation follows the [Google documentation](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html) style for docstrings, although we can write them directly in Markdown. ### Adding a new tutorial Adding a new tutorial or section is done in two steps: - Add a new file under `./source`. This file can either be ReStructuredText (.rst) or Markdown (.md). - Link that file in `./source/_toctree.yml` on the correct toc-tree. Make sure to put your new file under the proper section. It's unlikely to go in the first section (*Get Started*), so depending on the intended targets (beginners, more advanced users, or researchers) it should go in sections two, three, or four. ### Translating When translating, refer to the guide at [./TRANSLATING.md](https://github.com/huggingface/transformers/blob/main/docs/TRANSLATING.md). ### Adding a new model When adding a new model: - Create a file `xxx.md` or under `./source/model_doc` (don't hesitate to copy an existing file as template). - Link that file in `./source/_toctree.yml`. - Write a short overview of the model: - Overview with paper & authors - Paper abstract - Tips and tricks and how to use it best - Add the classes that should be linked in the model. This generally includes the configuration, the tokenizer, and every model of that class (the base model, alongside models with additional heads), both in PyTorch and TensorFlow. The order is generally: - Configuration - Tokenizer - PyTorch base model - PyTorch head models - TensorFlow base model - TensorFlow head models - Flax base model - Flax head models These classes should be added using our Markdown syntax. Usually as follows: ``` ## XXXConfig [[autodoc]] XXXConfig ``` This will include every public method of the configuration that is documented. If for some reason you wish for a method not to be displayed in the documentation, you can do so by specifying which methods should be in the docs: ``` ## XXXTokenizer [[autodoc]] XXXTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ``` If you just want to add a method that is not documented (for instance magic methods like `__call__` are not documented by default) you can put the list of methods to add in a list that contains `all`: ``` ## XXXTokenizer [[autodoc]] XXXTokenizer - all - __call__ ``` ### Writing source documentation Values that should be put in `code` should either be surrounded by backticks: \`like so\`. Note that argument names and objects like True, None, or any strings should usually be put in `code`. When mentioning a class, function, or method, it is recommended to use our syntax for internal links so that our tool adds a link to its documentation with this syntax: \[\`XXXClass\`\] or \[\`function\`\]. This requires the class or function to be in the main package. If you want to create a link to some internal class or function, you need to provide its path. For instance: \[\`utils.ModelOutput\`\]. This will be converted into a link with `utils.ModelOutput` in the description. To get rid of the path and only keep the name of the object you are linking to in the description, add a ~: \[\`~utils.ModelOutput\`\] will generate a link with `ModelOutput` in the description. The same works for methods so you can either use \[\`XXXClass.method\`\] or \[\`~XXXClass.method\`\]. #### Defining arguments in a method Arguments should be defined with the `Args:` (or `Arguments:` or `Parameters:`) prefix, followed by a line return and an indentation. The argument should be followed by its type, with its shape if it is a tensor, a colon, and its description: ``` Args: n_layers (`int`): The number of layers of the model. ``` If the description is too long to fit in one line, another indentation is necessary before writing the description after the argument. Here's an example showcasing everything so far: ``` Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AlbertTokenizer`]. See [`~PreTrainedTokenizer.encode`] and [`~PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) ``` For optional arguments or arguments with defaults we follow the following syntax: imagine we have a function with the following signature: ``` def my_function(x: str = None, a: float = 1): ``` then its documentation should look like this: ``` Args: x (`str`, *optional*): This argument controls ... a (`float`, *optional*, defaults to 1): This argument is used to ... ``` Note that we always omit the "defaults to \`None\`" when None is the default for any argument. Also note that even if the first line describing your argument type and its default gets long, you can't break it on several lines. You can however, write as many lines as you want in the indented description (see the example above with `input_ids`). #### Writing a multi-line code block Multi-line code blocks can be useful for displaying examples. They are done between two lines of three backticks as usual in Markdown: ```` ``` # first line of code # second line # etc ``` ```` We follow the [doctest](https://docs.python.org/3/library/doctest.html) syntax for the examples to automatically test the results to stay consistent with the library. #### Writing a return block The return block should be introduced with the `Returns:` prefix, followed by a line return and an indentation. The first line should be the type of the return, followed by a line return. No need to indent further for the elements building the return. Here's an example of a single value return: ``` Returns: `List[int]`: A list of integers in the range [0, 1] --- 1 for a special token, 0 for a sequence token. ``` Here's an example of a tuple return, comprising several objects: ``` Returns: `tuple(torch.FloatTensor)` comprising various elements depending on the configuration ([`BertConfig`]) and inputs: - ** loss** (*optional*, returned when `masked_lm_labels` is provided) `torch.FloatTensor` of shape `(1,)` -- Total loss is the sum of the masked language modeling loss and the next sequence prediction (classification) loss. - **prediction_scores** (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`) -- Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). ``` #### Adding an image Due to the rapidly growing repository, it is important to make sure that no files that would significantly weigh down the repository are added. This includes images, videos, and other non-text files. We prefer to leverage a hf.co hosted `dataset` like the ones hosted on [`hf-internal-testing`](https://huggingface.co/hf-internal-testing) in which to place these files and reference them by URL. We recommend putting them in the following dataset: [huggingface/documentation-images](https://huggingface.co/datasets/huggingface/documentation-images). If an external contribution, feel free to add the images to your PR and ask a Hugging Face member to migrate your images to this dataset. ## Styling the docstring We have an automatic script running with the `make style` comment that will make sure that: - the docstrings fully take advantage of the line width - all code examples are formatted using black, like the code of the Transformers library This script may have some weird failures if you made a syntax mistake or if you uncover a bug. Therefore, it's recommended to commit your changes before running `make style`, so you can revert the changes done by that script easily. # Testing documentation examples Good documentation often comes with an example of how a specific function or class should be used. Each model class should contain at least one example showcasing how to use this model class in inference. *E.g.* the class [Wav2Vec2ForCTC](https://huggingface.co/docs/transformers/model_doc/wav2vec2#transformers.Wav2Vec2ForCTC) includes an example of how to transcribe speech to text in the [docstring of its forward function](https://huggingface.co/docs/transformers/model_doc/wav2vec2#transformers.Wav2Vec2ForCTC.forward). ## Writing documentation examples The syntax for Example docstrings can look as follows: ``` Example: ```python >>> from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC >>> from datasets import load_dataset >>> import torch >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") >>> dataset = dataset.sort("id") >>> sampling_rate = dataset.features["audio"].sampling_rate >>> processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h") >>> model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h") >>> # audio file is decoded on the fly >>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> predicted_ids = torch.argmax(logits, dim=-1) >>> # transcribe speech >>> transcription = processor.batch_decode(predicted_ids) >>> transcription[0] 'MISTER QUILTER IS THE APOSTLE OF THE MIDDLE CLASSES AND WE ARE GLAD TO WELCOME HIS GOSPEL' ``` ``` The docstring should give a minimal, clear example of how the respective model is to be used in inference and also include the expected (ideally sensible) output. Often, readers will try out the example before even going through the function or class definitions. Therefore, it is of utmost importance that the example works as expected. ## Docstring testing To do so each example should be included in the doctests. We use pytests' [doctest integration](https://docs.pytest.org/doctest.html) to verify that all of our examples run correctly. For Transformers, the doctests are run on a daily basis via GitHub Actions as can be seen [here](https://github.com/huggingface/transformers/actions/workflows/doctests.yml). ### For Python files Run all the tests in the docstrings of a given file with the following command, here is how we test the modeling file of Wav2Vec2 for instance: ```bash pytest --doctest-modules src/transformers/models/wav2vec2/modeling_wav2vec2.py -sv --doctest-continue-on-failure ``` If you want to isolate a specific docstring, just add `::` after the file name then type the whole path of the function/class/method whose docstring you want to test. For instance, here is how to just test the forward method of `Wav2Vec2ForCTC`: ```bash pytest --doctest-modules src/transformers/models/wav2vec2/modeling_wav2vec2.py::transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForCTC.forward -sv --doctest-continue-on-failure ``` ### For Markdown files You can test locally a given file with this command (here testing the quicktour): ```bash pytest --doctest-modules docs/source/quicktour.md -sv --doctest-continue-on-failure --doctest-glob="*.md" ``` ### Writing doctests Here are a few tips to help you debug the doctests and make them pass: - The outputs of the code need to match the expected output **exactly**, so make sure you have the same outputs. In particular doctest will see a difference between single quotes and double quotes, or a missing parenthesis. The only exceptions to that rule are: * whitespace: one give whitespace (space, tabulation, new line) is equivalent to any number of whitespace, so you can add new lines where there are spaces to make your output more readable. * numerical values: you should never put more than 4 or 5 digits to expected results as different setups or library versions might get you slightly different results. `doctest` is configured to ignore any difference lower than the precision to which you wrote (so 1e-4 if you write 4 digits). - Don't leave a block of code that is very long to execute. If you can't make it fast, you can either not use the doctest syntax on it (so that it's ignored), or if you want to use the doctest syntax to show the results, you can add a comment `# doctest: +SKIP` at the end of the lines of code too long to execute - Each line of code that produces a result needs to have that result written below. You can ignore an output if you don't want to show it in your code example by adding a comment ` # doctest: +IGNORE_RESULT` at the end of the line of code producing it.
transformers/docs/README.md/0
{ "file_path": "transformers/docs/README.md", "repo_id": "transformers", "token_count": 4835 }
250
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Pipelines für Inferenzen Die [`pipeline`] macht es einfach, jedes beliebige Modell aus dem [Hub](https://huggingface.co/models) für die Inferenz auf jede Sprache, Computer Vision, Sprache und multimodale Aufgaben zu verwenden. Selbst wenn Sie keine Erfahrung mit einer bestimmten Modalität haben oder nicht mit dem zugrundeliegenden Code hinter den Modellen vertraut sind, können Sie sie mit der [`pipeline`] für Inferenzen verwenden! In diesem Beispiel lernen Sie, wie: * Eine [`pipeline`] für Inferenz zu verwenden. * Einen bestimmten Tokenizer oder ein bestimmtes Modell zu verwenden. * Eine [`pipeline`] für Audio-, Vision- und multimodale Aufgaben zu verwenden. <Tip> Eine vollständige Liste der unterstützten Aufgaben und verfügbaren Parameter finden Sie in der [`pipeline`]-Dokumentation. </Tip> ## Verwendung von Pipelines Obwohl jede Aufgabe eine zugehörige [`pipeline`] hat, ist es einfacher, die allgemeine [`pipeline`]-Abstraktion zu verwenden, die alle aufgabenspezifischen Pipelines enthält. Die [`pipeline`] lädt automatisch ein Standardmodell und eine Vorverarbeitungsklasse, die für Ihre Aufgabe inferenzfähig ist. 1. Beginnen Sie mit der Erstellung einer [`pipeline`] und geben Sie eine Inferenzaufgabe an: ```py >>> from transformers import pipeline >>> generator = pipeline(task="text-generation") ``` 2. Übergeben Sie Ihren Eingabetext an die [`pipeline`]: ```py >>> generator( ... "Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone" ... ) # doctest: +SKIP [{'generated_text': 'Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone, Seven for the Iron-priests at the door to the east, and thirteen for the Lord Kings at the end of the mountain'}] ``` Wenn Sie mehr als eine Eingabe haben, übergeben Sie die Eingabe als Liste: ```py >>> generator( ... [ ... "Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone", ... "Nine for Mortal Men, doomed to die, One for the Dark Lord on his dark throne", ... ] ... ) # doctest: +SKIP ``` Alle zusätzlichen Parameter für Ihre Aufgabe können auch in die [`pipeline`] aufgenommen werden. Die Aufgabe `Text-Generierung` hat eine [`~generation.GenerationMixin.generate`]-Methode mit mehreren Parametern zur Steuerung der Ausgabe. Wenn Sie zum Beispiel mehr als eine Ausgabe erzeugen wollen, setzen Sie den Parameter `num_return_sequences`: ```py >>> generator( ... "Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone", ... num_return_sequences=2, ... ) # doctest: +SKIP ``` ### Wählen Sie ein Modell und einen Tokenizer Die [`pipeline`] akzeptiert jedes Modell aus dem [Hub](https://huggingface.co/models). Auf dem Hub gibt es Tags, mit denen Sie nach einem Modell filtern können, das Sie für Ihre Aufgabe verwenden möchten. Sobald Sie ein passendes Modell ausgewählt haben, laden Sie es mit der entsprechenden `AutoModelFor` und [`AutoTokenizer`] Klasse. Laden Sie zum Beispiel die Klasse [`AutoModelForCausalLM`] für eine kausale Sprachmodellierungsaufgabe: ```py >>> from transformers import AutoTokenizer, AutoModelForCausalLM >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilgpt2") >>> model = AutoModelForCausalLM.from_pretrained("distilbert/distilgpt2") ``` Erstellen Sie eine [`pipeline`] für Ihre Aufgabe, und geben Sie das Modell und den Tokenizer an, die Sie geladen haben: ```py >>> from transformers import pipeline >>> generator = pipeline(task="text-generation", model=model, tokenizer=tokenizer) ``` Übergeben Sie Ihren Eingabetext an die [`pipeline`] , um einen Text zu erzeugen: ```py >>> generator( ... "Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone" ... ) # doctest: +SKIP [{'generated_text': 'Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone, Seven for the Dragon-lords (for them to rule in a world ruled by their rulers, and all who live within the realm'}] ``` ## Audio-Pipeline Die [`pipeline`] unterstützt auch Audioaufgaben wie Audioklassifizierung und automatische Spracherkennung. Lassen Sie uns zum Beispiel die Emotion in diesem Audioclip klassifizieren: ```py >>> from datasets import load_dataset >>> import torch >>> torch.manual_seed(42) # doctest: +IGNORE_RESULT >>> ds = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") >>> audio_file = ds[0]["audio"]["path"] ``` Finden Sie ein [Audioklassifikation](https://huggingface.co/models?pipeline_tag=audio-classification) Modell auf dem Model Hub für Emotionserkennung und laden Sie es in die [`pipeline`]: ```py >>> from transformers import pipeline >>> audio_classifier = pipeline( ... task="audio-classification", model="ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition" ... ) ``` Übergeben Sie die Audiodatei an die [`pipeline`]: ```py >>> preds = audio_classifier(audio_file) >>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds] >>> preds [{'score': 0.1315, 'label': 'calm'}, {'score': 0.1307, 'label': 'neutral'}, {'score': 0.1274, 'label': 'sad'}, {'score': 0.1261, 'label': 'fearful'}, {'score': 0.1242, 'label': 'happy'}] ``` ## Bildverarbeitungs-Pipeline Die Verwendung einer [`pipeline`] für Bildverarbeitungsaufgaben ist praktisch identisch. Geben Sie Ihre Aufgabe an und übergeben Sie Ihr Bild an den Klassifikator. Das Bild kann ein Link oder ein lokaler Pfad zu dem Bild sein. Zum Beispiel: Welche Katzenart ist unten abgebildet? ![pipeline-cat-chonk](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg) ```py >>> from transformers import pipeline >>> vision_classifier = pipeline(task="image-classification") >>> preds = vision_classifier( ... images="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" ... ) >>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds] >>> preds [{'score': 0.4335, 'label': 'lynx, catamount'}, {'score': 0.0348, 'label': 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor'}, {'score': 0.0324, 'label': 'snow leopard, ounce, Panthera uncia'}, {'score': 0.0239, 'label': 'Egyptian cat'}, {'score': 0.0229, 'label': 'tiger cat'}] ``` ## Multimodale Pipeline Die [`pipeline`] unterstützt mehr als eine Modalität. Eine Aufgabe zur Beantwortung visueller Fragen (VQA) kombiniert zum Beispiel Text und Bild. Verwenden Sie einen beliebigen Bildlink und eine Frage, die Sie zu dem Bild stellen möchten. Das Bild kann eine URL oder ein lokaler Pfad zu dem Bild sein. Wenn Sie zum Beispiel das gleiche Bild wie in der obigen Vision-Pipeline verwenden: ```py >>> image = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" >>> question = "Where is the cat?" ``` Erstellen Sie eine Pipeline für "vqa" und übergeben Sie ihr das Bild und die Frage: ```py >>> from transformers import pipeline >>> vqa = pipeline(task="vqa") >>> preds = vqa(image=image, question=question) >>> preds = [{"score": round(pred["score"], 4), "answer": pred["answer"]} for pred in preds] >>> preds [{'score': 0.9112, 'answer': 'snow'}, {'score': 0.8796, 'answer': 'in snow'}, {'score': 0.6717, 'answer': 'outside'}, {'score': 0.0291, 'answer': 'on ground'}, {'score': 0.027, 'answer': 'ground'}] ```
transformers/docs/source/de/pipeline_tutorial.md/0
{ "file_path": "transformers/docs/source/de/pipeline_tutorial.md", "repo_id": "transformers", "token_count": 3003 }
251
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Load pretrained instances with an AutoClass With so many different Transformer architectures, it can be challenging to create one for your checkpoint. As a part of 🤗 Transformers core philosophy to make the library easy, simple and flexible to use, an `AutoClass` automatically infers and loads the correct architecture from a given checkpoint. The `from_pretrained()` method lets you quickly load a pretrained model for any architecture so you don't have to devote time and resources to train a model from scratch. Producing this type of checkpoint-agnostic code means if your code works for one checkpoint, it will work with another checkpoint - as long as it was trained for a similar task - even if the architecture is different. <Tip> Remember, architecture refers to the skeleton of the model and checkpoints are the weights for a given architecture. For example, [BERT](https://huggingface.co/google-bert/bert-base-uncased) is an architecture, while `google-bert/bert-base-uncased` is a checkpoint. Model is a general term that can mean either architecture or checkpoint. </Tip> In this tutorial, learn to: * Load a pretrained tokenizer. * Load a pretrained image processor * Load a pretrained feature extractor. * Load a pretrained processor. * Load a pretrained model. * Load a model as a backbone. ## AutoTokenizer Nearly every NLP task begins with a tokenizer. A tokenizer converts your input into a format that can be processed by the model. Load a tokenizer with [`AutoTokenizer.from_pretrained`]: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased") ``` Then tokenize your input as shown below: ```py >>> sequence = "In a hole in the ground there lived a hobbit." >>> print(tokenizer(sequence)) {'input_ids': [101, 1999, 1037, 4920, 1999, 1996, 2598, 2045, 2973, 1037, 7570, 10322, 4183, 1012, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} ``` ## AutoImageProcessor For vision tasks, an image processor processes the image into the correct input format. ```py >>> from transformers import AutoImageProcessor >>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224") ``` ## AutoBackbone <div style="text-align: center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/Swin%20Stages.png"> <figcaption class="mt-2 text-center text-sm text-gray-500">A Swin backbone with multiple stages for outputting a feature map.</figcaption> </div> The [`AutoBackbone`] lets you use pretrained models as backbones to get feature maps from different stages of the backbone. You should specify one of the following parameters in [`~PretrainedConfig.from_pretrained`]: * `out_indices` is the index of the layer you'd like to get the feature map from * `out_features` is the name of the layer you'd like to get the feature map from These parameters can be used interchangeably, but if you use both, make sure they're aligned with each other! If you don't pass any of these parameters, the backbone returns the feature map from the last layer. <div style="text-align: center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/Swin%20Stage%201.png"> <figcaption class="mt-2 text-center text-sm text-gray-500">A feature map from the first stage of the backbone. The patch partition refers to the model stem.</figcaption> </div> For example, in the above diagram, to return the feature map from the first stage of the Swin backbone, you can set `out_indices=(1,)`: ```py >>> from transformers import AutoImageProcessor, AutoBackbone >>> import torch >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> processor = AutoImageProcessor.from_pretrained("microsoft/swin-tiny-patch4-window7-224") >>> model = AutoBackbone.from_pretrained("microsoft/swin-tiny-patch4-window7-224", out_indices=(1,)) >>> inputs = processor(image, return_tensors="pt") >>> outputs = model(**inputs) >>> feature_maps = outputs.feature_maps ``` Now you can access the `feature_maps` object from the first stage of the backbone: ```py >>> list(feature_maps[0].shape) [1, 96, 56, 56] ``` ## AutoFeatureExtractor For audio tasks, a feature extractor processes the audio signal the correct input format. Load a feature extractor with [`AutoFeatureExtractor.from_pretrained`]: ```py >>> from transformers import AutoFeatureExtractor >>> feature_extractor = AutoFeatureExtractor.from_pretrained( ... "ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition" ... ) ``` ## AutoProcessor Multimodal tasks require a processor that combines two types of preprocessing tools. For example, the [LayoutLMV2](model_doc/layoutlmv2) model requires an image processor to handle images and a tokenizer to handle text; a processor combines both of them. Load a processor with [`AutoProcessor.from_pretrained`]: ```py >>> from transformers import AutoProcessor >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv2-base-uncased") ``` ## AutoModel <frameworkcontent> <pt> The `AutoModelFor` classes let you load a pretrained model for a given task (see [here](model_doc/auto) for a complete list of available tasks). For example, load a model for sequence classification with [`AutoModelForSequenceClassification.from_pretrained`]: ```py >>> from transformers import AutoModelForSequenceClassification >>> model = AutoModelForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` Easily reuse the same checkpoint to load an architecture for a different task: ```py >>> from transformers import AutoModelForTokenClassification >>> model = AutoModelForTokenClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` <Tip warning={true}> For PyTorch models, the `from_pretrained()` method uses `torch.load()` which internally uses `pickle` and is known to be insecure. In general, never load a model that could have come from an untrusted source, or that could have been tampered with. This security risk is partially mitigated for public models hosted on the Hugging Face Hub, which are [scanned for malware](https://huggingface.co/docs/hub/security-malware) at each commit. See the [Hub documentation](https://huggingface.co/docs/hub/security) for best practices like [signed commit verification](https://huggingface.co/docs/hub/security-gpg#signing-commits-with-gpg) with GPG. TensorFlow and Flax checkpoints are not affected, and can be loaded within PyTorch architectures using the `from_tf` and `from_flax` kwargs for the `from_pretrained` method to circumvent this issue. </Tip> Generally, we recommend using the `AutoTokenizer` class and the `AutoModelFor` class to load pretrained instances of models. This will ensure you load the correct architecture every time. In the next [tutorial](preprocessing), learn how to use your newly loaded tokenizer, image processor, feature extractor and processor to preprocess a dataset for fine-tuning. </pt> <tf> Finally, the `TFAutoModelFor` classes let you load a pretrained model for a given task (see [here](model_doc/auto) for a complete list of available tasks). For example, load a model for sequence classification with [`TFAutoModelForSequenceClassification.from_pretrained`]: ```py >>> from transformers import TFAutoModelForSequenceClassification >>> model = TFAutoModelForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` Easily reuse the same checkpoint to load an architecture for a different task: ```py >>> from transformers import TFAutoModelForTokenClassification >>> model = TFAutoModelForTokenClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` Generally, we recommend using the `AutoTokenizer` class and the `TFAutoModelFor` class to load pretrained instances of models. This will ensure you load the correct architecture every time. In the next [tutorial](preprocessing), learn how to use your newly loaded tokenizer, image processor, feature extractor and processor to preprocess a dataset for fine-tuning. </tf> </frameworkcontent>
transformers/docs/source/en/autoclass_tutorial.md/0
{ "file_path": "transformers/docs/source/en/autoclass_tutorial.md", "repo_id": "transformers", "token_count": 2553 }
252
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Backbone A backbone is a model used for feature extraction for higher level computer vision tasks such as object detection and image classification. Transformers provides an [`AutoBackbone`] class for initializing a Transformers backbone from pretrained model weights, and two utility classes: * [`~utils.BackboneMixin`] enables initializing a backbone from Transformers or [timm](https://hf.co/docs/timm/index) and includes functions for returning the output features and indices. * [`~utils.BackboneConfigMixin`] sets the output features and indices of the backbone configuration. [timm](https://hf.co/docs/timm/index) models are loaded with the [`TimmBackbone`] and [`TimmBackboneConfig`] classes. Backbones are supported for the following models: * [BEiT](..model_doc/beit) * [BiT](../model_doc/bit) * [ConvNet](../model_doc/convnext) * [ConvNextV2](../model_doc/convnextv2) * [DiNAT](..model_doc/dinat) * [DINOV2](../model_doc/dinov2) * [FocalNet](../model_doc/focalnet) * [MaskFormer](../model_doc/maskformer) * [NAT](../model_doc/nat) * [ResNet](../model_doc/resnet) * [Swin Transformer](../model_doc/swin) * [Swin Transformer v2](../model_doc/swinv2) * [ViTDet](../model_doc/vitdet) ## AutoBackbone [[autodoc]] AutoBackbone ## BackboneMixin [[autodoc]] utils.BackboneMixin ## BackboneConfigMixin [[autodoc]] utils.BackboneConfigMixin ## TimmBackbone [[autodoc]] models.timm_backbone.TimmBackbone ## TimmBackboneConfig [[autodoc]] models.timm_backbone.TimmBackboneConfig
transformers/docs/source/en/main_classes/backbones.md/0
{ "file_path": "transformers/docs/source/en/main_classes/backbones.md", "repo_id": "transformers", "token_count": 689 }
253
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Generation Each framework has a generate method for text generation implemented in their respective `GenerationMixin` class: - PyTorch [`~generation.GenerationMixin.generate`] is implemented in [`~generation.GenerationMixin`]. - TensorFlow [`~generation.TFGenerationMixin.generate`] is implemented in [`~generation.TFGenerationMixin`]. - Flax/JAX [`~generation.FlaxGenerationMixin.generate`] is implemented in [`~generation.FlaxGenerationMixin`]. Regardless of your framework of choice, you can parameterize the generate method with a [`~generation.GenerationConfig`] class instance. Please refer to this class for the complete list of generation parameters, which control the behavior of the generation method. To learn how to inspect a model's generation configuration, what are the defaults, how to change the parameters ad hoc, and how to create and save a customized generation configuration, refer to the [text generation strategies guide](../generation_strategies). The guide also explains how to use related features, like token streaming. ## GenerationConfig [[autodoc]] generation.GenerationConfig - from_pretrained - from_model_config - save_pretrained - update - validate - get_generation_mode ## GenerationMixin [[autodoc]] generation.GenerationMixin - generate - compute_transition_scores ## TFGenerationMixin [[autodoc]] generation.TFGenerationMixin - generate - compute_transition_scores ## FlaxGenerationMixin [[autodoc]] generation.FlaxGenerationMixin - generate
transformers/docs/source/en/main_classes/text_generation.md/0
{ "file_path": "transformers/docs/source/en/main_classes/text_generation.md", "repo_id": "transformers", "token_count": 596 }
254
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # BERT <div class="flex flex-wrap space-x-1"> <a href="https://huggingface.co/models?filter=bert"> <img alt="Models" src="https://img.shields.io/badge/All_model_pages-bert-blueviolet"> </a> <a href="https://huggingface.co/spaces/docs-demos/bert-base-uncased"> <img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"> </a> </div> ## Overview The BERT model was proposed in [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805) by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova. It's a bidirectional transformer pretrained using a combination of masked language modeling objective and next sentence prediction on a large corpus comprising the Toronto Book Corpus and Wikipedia. The abstract from the paper is the following: *We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models, BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications.* *BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to 80.5% (7.7% point absolute improvement), MultiNLI accuracy to 86.7% (4.6% absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement).* This model was contributed by [thomwolf](https://huggingface.co/thomwolf). The original code can be found [here](https://github.com/google-research/bert). ## Usage tips - BERT is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. - BERT was trained with the masked language modeling (MLM) and next sentence prediction (NSP) objectives. It is efficient at predicting masked tokens and at NLU in general, but is not optimal for text generation. - Corrupts the inputs by using random masking, more precisely, during pretraining, a given percentage of tokens (usually 15%) is masked by: * a special mask token with probability 0.8 * a random token different from the one masked with probability 0.1 * the same token with probability 0.1 - The model must predict the original sentence, but has a second objective: inputs are two sentences A and B (with a separation token in between). With probability 50%, the sentences are consecutive in the corpus, in the remaining 50% they are not related. The model has to predict if the sentences are consecutive or not. ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with BERT. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. <PipelineTag pipeline="text-classification"/> - A blog post on [BERT Text Classification in a different language](https://www.philschmid.de/bert-text-classification-in-a-different-language). - A notebook for [Finetuning BERT (and friends) for multi-label text classification](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/BERT/Fine_tuning_BERT_(and_friends)_for_multi_label_text_classification.ipynb). - A notebook on how to [Finetune BERT for multi-label classification using PyTorch](https://colab.research.google.com/github/abhimishra91/transformers-tutorials/blob/master/transformers_multi_label_classification.ipynb). 🌎 - A notebook on how to [warm-start an EncoderDecoder model with BERT for summarization](https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/BERT2BERT_for_CNN_Dailymail.ipynb). - [`BertForSequenceClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification.ipynb). - [`TFBertForSequenceClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/text-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification-tf.ipynb). - [`FlaxBertForSequenceClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/flax/text-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification_flax.ipynb). - [Text classification task guide](../tasks/sequence_classification) <PipelineTag pipeline="token-classification"/> - A blog post on how to use [Hugging Face Transformers with Keras: Fine-tune a non-English BERT for Named Entity Recognition](https://www.philschmid.de/huggingface-transformers-keras-tf). - A notebook for [Finetuning BERT for named-entity recognition](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/BERT/Custom_Named_Entity_Recognition_with_BERT_only_first_wordpiece.ipynb) using only the first wordpiece of each word in the word label during tokenization. To propagate the label of the word to all wordpieces, see this [version](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/BERT/Custom_Named_Entity_Recognition_with_BERT.ipynb) of the notebook instead. - [`BertForTokenClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/token-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification.ipynb). - [`TFBertForTokenClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/token-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification-tf.ipynb). - [`FlaxBertForTokenClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/flax/token-classification). - [Token classification](https://huggingface.co/course/chapter7/2?fw=pt) chapter of the 🤗 Hugging Face Course. - [Token classification task guide](../tasks/token_classification) <PipelineTag pipeline="fill-mask"/> - [`BertForMaskedLM`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/language-modeling#robertabertdistilbert-and-masked-language-modeling) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb). - [`TFBertForMaskedLM`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/language-modeling#run_mlmpy) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb). - [`FlaxBertForMaskedLM`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/flax/language-modeling#masked-language-modeling) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/masked_language_modeling_flax.ipynb). - [Masked language modeling](https://huggingface.co/course/chapter7/3?fw=pt) chapter of the 🤗 Hugging Face Course. - [Masked language modeling task guide](../tasks/masked_language_modeling) <PipelineTag pipeline="question-answering"/> - [`BertForQuestionAnswering`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/question-answering) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering.ipynb). - [`TFBertForQuestionAnswering`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/question-answering) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering-tf.ipynb). - [`FlaxBertForQuestionAnswering`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/flax/question-answering). - [Question answering](https://huggingface.co/course/chapter7/7?fw=pt) chapter of the 🤗 Hugging Face Course. - [Question answering task guide](../tasks/question_answering) **Multiple choice** - [`BertForMultipleChoice`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/multiple-choice) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/multiple_choice.ipynb). - [`TFBertForMultipleChoice`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/multiple-choice) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/multiple_choice-tf.ipynb). - [Multiple choice task guide](../tasks/multiple_choice) ⚡️ **Inference** - A blog post on how to [Accelerate BERT inference with Hugging Face Transformers and AWS Inferentia](https://huggingface.co/blog/bert-inferentia-sagemaker). - A blog post on how to [Accelerate BERT inference with DeepSpeed-Inference on GPUs](https://www.philschmid.de/bert-deepspeed-inference). ⚙️ **Pretraining** - A blog post on [Pre-Training BERT with Hugging Face Transformers and Habana Gaudi](https://www.philschmid.de/pre-training-bert-habana). 🚀 **Deploy** - A blog post on how to [Convert Transformers to ONNX with Hugging Face Optimum](https://www.philschmid.de/convert-transformers-to-onnx). - A blog post on how to [Setup Deep Learning environment for Hugging Face Transformers with Habana Gaudi on AWS](https://www.philschmid.de/getting-started-habana-gaudi#conclusion). - A blog post on [Autoscaling BERT with Hugging Face Transformers, Amazon SageMaker and Terraform module](https://www.philschmid.de/terraform-huggingface-amazon-sagemaker-advanced). - A blog post on [Serverless BERT with HuggingFace, AWS Lambda, and Docker](https://www.philschmid.de/serverless-bert-with-huggingface-aws-lambda-docker). - A blog post on [Hugging Face Transformers BERT fine-tuning using Amazon SageMaker and Training Compiler](https://www.philschmid.de/huggingface-amazon-sagemaker-training-compiler). - A blog post on [Task-specific knowledge distillation for BERT using Transformers & Amazon SageMaker](https://www.philschmid.de/knowledge-distillation-bert-transformers). ## BertConfig [[autodoc]] BertConfig - all ## BertTokenizer [[autodoc]] BertTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary <frameworkcontent> <pt> ## BertTokenizerFast [[autodoc]] BertTokenizerFast </pt> <tf> ## TFBertTokenizer [[autodoc]] TFBertTokenizer </tf> </frameworkcontent> ## Bert specific outputs [[autodoc]] models.bert.modeling_bert.BertForPreTrainingOutput [[autodoc]] models.bert.modeling_tf_bert.TFBertForPreTrainingOutput [[autodoc]] models.bert.modeling_flax_bert.FlaxBertForPreTrainingOutput <frameworkcontent> <pt> ## BertModel [[autodoc]] BertModel - forward ## BertForPreTraining [[autodoc]] BertForPreTraining - forward ## BertLMHeadModel [[autodoc]] BertLMHeadModel - forward ## BertForMaskedLM [[autodoc]] BertForMaskedLM - forward ## BertForNextSentencePrediction [[autodoc]] BertForNextSentencePrediction - forward ## BertForSequenceClassification [[autodoc]] BertForSequenceClassification - forward ## BertForMultipleChoice [[autodoc]] BertForMultipleChoice - forward ## BertForTokenClassification [[autodoc]] BertForTokenClassification - forward ## BertForQuestionAnswering [[autodoc]] BertForQuestionAnswering - forward </pt> <tf> ## TFBertModel [[autodoc]] TFBertModel - call ## TFBertForPreTraining [[autodoc]] TFBertForPreTraining - call ## TFBertModelLMHeadModel [[autodoc]] TFBertLMHeadModel - call ## TFBertForMaskedLM [[autodoc]] TFBertForMaskedLM - call ## TFBertForNextSentencePrediction [[autodoc]] TFBertForNextSentencePrediction - call ## TFBertForSequenceClassification [[autodoc]] TFBertForSequenceClassification - call ## TFBertForMultipleChoice [[autodoc]] TFBertForMultipleChoice - call ## TFBertForTokenClassification [[autodoc]] TFBertForTokenClassification - call ## TFBertForQuestionAnswering [[autodoc]] TFBertForQuestionAnswering - call </tf> <jax> ## FlaxBertModel [[autodoc]] FlaxBertModel - __call__ ## FlaxBertForPreTraining [[autodoc]] FlaxBertForPreTraining - __call__ ## FlaxBertForCausalLM [[autodoc]] FlaxBertForCausalLM - __call__ ## FlaxBertForMaskedLM [[autodoc]] FlaxBertForMaskedLM - __call__ ## FlaxBertForNextSentencePrediction [[autodoc]] FlaxBertForNextSentencePrediction - __call__ ## FlaxBertForSequenceClassification [[autodoc]] FlaxBertForSequenceClassification - __call__ ## FlaxBertForMultipleChoice [[autodoc]] FlaxBertForMultipleChoice - __call__ ## FlaxBertForTokenClassification [[autodoc]] FlaxBertForTokenClassification - __call__ ## FlaxBertForQuestionAnswering [[autodoc]] FlaxBertForQuestionAnswering - __call__ </jax> </frameworkcontent>
transformers/docs/source/en/model_doc/bert.md/0
{ "file_path": "transformers/docs/source/en/model_doc/bert.md", "repo_id": "transformers", "token_count": 4645 }
255
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # CANINE ## Overview The CANINE model was proposed in [CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation](https://arxiv.org/abs/2103.06874) by Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting. It's among the first papers that trains a Transformer without using an explicit tokenization step (such as Byte Pair Encoding (BPE), WordPiece or SentencePiece). Instead, the model is trained directly at a Unicode character-level. Training at a character-level inevitably comes with a longer sequence length, which CANINE solves with an efficient downsampling strategy, before applying a deep Transformer encoder. The abstract from the paper is the following: *Pipelined NLP systems have largely been superseded by end-to-end neural modeling, yet nearly all commonly-used models still require an explicit tokenization step. While recent tokenization approaches based on data-derived subword lexicons are less brittle than manually engineered tokenizers, these techniques are not equally suited to all languages, and the use of any fixed vocabulary may limit a model's ability to adapt. In this paper, we present CANINE, a neural encoder that operates directly on character sequences, without explicit tokenization or vocabulary, and a pre-training strategy that operates either directly on characters or optionally uses subwords as a soft inductive bias. To use its finer-grained input effectively and efficiently, CANINE combines downsampling, which reduces the input sequence length, with a deep transformer stack, which encodes context. CANINE outperforms a comparable mBERT model by 2.8 F1 on TyDi QA, a challenging multilingual benchmark, despite having 28% fewer model parameters.* This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/google-research/language/tree/master/language/canine). ## Usage tips - CANINE uses no less than 3 Transformer encoders internally: 2 "shallow" encoders (which only consist of a single layer) and 1 "deep" encoder (which is a regular BERT encoder). First, a "shallow" encoder is used to contextualize the character embeddings, using local attention. Next, after downsampling, a "deep" encoder is applied. Finally, after upsampling, a "shallow" encoder is used to create the final character embeddings. Details regarding up- and downsampling can be found in the paper. - CANINE uses a max sequence length of 2048 characters by default. One can use [`CanineTokenizer`] to prepare text for the model. - Classification can be done by placing a linear layer on top of the final hidden state of the special [CLS] token (which has a predefined Unicode code point). For token classification tasks however, the downsampled sequence of tokens needs to be upsampled again to match the length of the original character sequence (which is 2048). The details for this can be found in the paper. Model checkpoints: - [google/canine-c](https://huggingface.co/google/canine-c): Pre-trained with autoregressive character loss, 12-layer, 768-hidden, 12-heads, 121M parameters (size ~500 MB). - [google/canine-s](https://huggingface.co/google/canine-s): Pre-trained with subword loss, 12-layer, 768-hidden, 12-heads, 121M parameters (size ~500 MB). ## Usage example CANINE works on raw characters, so it can be used **without a tokenizer**: ```python >>> from transformers import CanineModel >>> import torch >>> model = CanineModel.from_pretrained("google/canine-c") # model pre-trained with autoregressive character loss >>> text = "hello world" >>> # use Python's built-in ord() function to turn each character into its unicode code point id >>> input_ids = torch.tensor([[ord(char) for char in text]]) >>> outputs = model(input_ids) # forward pass >>> pooled_output = outputs.pooler_output >>> sequence_output = outputs.last_hidden_state ``` For batched inference and training, it is however recommended to make use of the tokenizer (to pad/truncate all sequences to the same length): ```python >>> from transformers import CanineTokenizer, CanineModel >>> model = CanineModel.from_pretrained("google/canine-c") >>> tokenizer = CanineTokenizer.from_pretrained("google/canine-c") >>> inputs = ["Life is like a box of chocolates.", "You never know what you gonna get."] >>> encoding = tokenizer(inputs, padding="longest", truncation=True, return_tensors="pt") >>> outputs = model(**encoding) # forward pass >>> pooled_output = outputs.pooler_output >>> sequence_output = outputs.last_hidden_state ``` ## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) - [Question answering task guide](../tasks/question_answering) - [Multiple choice task guide](../tasks/multiple_choice) ## CanineConfig [[autodoc]] CanineConfig ## CanineTokenizer [[autodoc]] CanineTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences ## CANINE specific outputs [[autodoc]] models.canine.modeling_canine.CanineModelOutputWithPooling ## CanineModel [[autodoc]] CanineModel - forward ## CanineForSequenceClassification [[autodoc]] CanineForSequenceClassification - forward ## CanineForMultipleChoice [[autodoc]] CanineForMultipleChoice - forward ## CanineForTokenClassification [[autodoc]] CanineForTokenClassification - forward ## CanineForQuestionAnswering [[autodoc]] CanineForQuestionAnswering - forward
transformers/docs/source/en/model_doc/canine.md/0
{ "file_path": "transformers/docs/source/en/model_doc/canine.md", "repo_id": "transformers", "token_count": 1723 }
256
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Convolutional Vision Transformer (CvT) ## Overview The CvT model was proposed in [CvT: Introducing Convolutions to Vision Transformers](https://arxiv.org/abs/2103.15808) by Haiping Wu, Bin Xiao, Noel Codella, Mengchen Liu, Xiyang Dai, Lu Yuan and Lei Zhang. The Convolutional vision Transformer (CvT) improves the [Vision Transformer (ViT)](vit) in performance and efficiency by introducing convolutions into ViT to yield the best of both designs. The abstract from the paper is the following: *We present in this paper a new architecture, named Convolutional vision Transformer (CvT), that improves Vision Transformer (ViT) in performance and efficiency by introducing convolutions into ViT to yield the best of both designs. This is accomplished through two primary modifications: a hierarchy of Transformers containing a new convolutional token embedding, and a convolutional Transformer block leveraging a convolutional projection. These changes introduce desirable properties of convolutional neural networks (CNNs) to the ViT architecture (\ie shift, scale, and distortion invariance) while maintaining the merits of Transformers (\ie dynamic attention, global context, and better generalization). We validate CvT by conducting extensive experiments, showing that this approach achieves state-of-the-art performance over other Vision Transformers and ResNets on ImageNet-1k, with fewer parameters and lower FLOPs. In addition, performance gains are maintained when pretrained on larger datasets (\eg ImageNet-22k) and fine-tuned to downstream tasks. Pre-trained on ImageNet-22k, our CvT-W24 obtains a top-1 accuracy of 87.7\% on the ImageNet-1k val set. Finally, our results show that the positional encoding, a crucial component in existing Vision Transformers, can be safely removed in our model, simplifying the design for higher resolution vision tasks.* This model was contributed by [anugunj](https://huggingface.co/anugunj). The original code can be found [here](https://github.com/microsoft/CvT). ## Usage tips - CvT models are regular Vision Transformers, but trained with convolutions. They outperform the [original model (ViT)](vit) when fine-tuned on ImageNet-1K and CIFAR-100. - You can check out demo notebooks regarding inference as well as fine-tuning on custom data [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/VisionTransformer) (you can just replace [`ViTFeatureExtractor`] by [`AutoImageProcessor`] and [`ViTForImageClassification`] by [`CvtForImageClassification`]). - The available checkpoints are either (1) pre-trained on [ImageNet-22k](http://www.image-net.org/) (a collection of 14 million images and 22k classes) only, (2) also fine-tuned on ImageNet-22k or (3) also fine-tuned on [ImageNet-1k](http://www.image-net.org/challenges/LSVRC/2012/) (also referred to as ILSVRC 2012, a collection of 1.3 million images and 1,000 classes). ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with CvT. <PipelineTag pipeline="image-classification"/> - [`CvtForImageClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb). - See also: [Image classification task guide](../tasks/image_classification) If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. ## CvtConfig [[autodoc]] CvtConfig <frameworkcontent> <pt> ## CvtModel [[autodoc]] CvtModel - forward ## CvtForImageClassification [[autodoc]] CvtForImageClassification - forward </pt> <tf> ## TFCvtModel [[autodoc]] TFCvtModel - call ## TFCvtForImageClassification [[autodoc]] TFCvtForImageClassification - call </tf> </frameworkcontent>
transformers/docs/source/en/model_doc/cvt.md/0
{ "file_path": "transformers/docs/source/en/model_doc/cvt.md", "repo_id": "transformers", "token_count": 1314 }
257
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # FLAVA ## Overview The FLAVA model was proposed in [FLAVA: A Foundational Language And Vision Alignment Model](https://arxiv.org/abs/2112.04482) by Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela and is accepted at CVPR 2022. The paper aims at creating a single unified foundation model which can work across vision, language as well as vision-and-language multimodal tasks. The abstract from the paper is the following: *State-of-the-art vision and vision-and-language models rely on large-scale visio-linguistic pretraining for obtaining good performance on a variety of downstream tasks. Generally, such models are often either cross-modal (contrastive) or multi-modal (with earlier fusion) but not both; and they often only target specific modalities or tasks. A promising direction would be to use a single holistic universal model, as a "foundation", that targets all modalities at once -- a true vision and language foundation model should be good at vision tasks, language tasks, and cross- and multi-modal vision and language tasks. We introduce FLAVA as such a model and demonstrate impressive performance on a wide range of 35 tasks spanning these target modalities.* This model was contributed by [aps](https://huggingface.co/aps). The original code can be found [here](https://github.com/facebookresearch/multimodal/tree/main/examples/flava). ## FlavaConfig [[autodoc]] FlavaConfig ## FlavaTextConfig [[autodoc]] FlavaTextConfig ## FlavaImageConfig [[autodoc]] FlavaImageConfig ## FlavaMultimodalConfig [[autodoc]] FlavaMultimodalConfig ## FlavaImageCodebookConfig [[autodoc]] FlavaImageCodebookConfig ## FlavaProcessor [[autodoc]] FlavaProcessor ## FlavaFeatureExtractor [[autodoc]] FlavaFeatureExtractor ## FlavaImageProcessor [[autodoc]] FlavaImageProcessor - preprocess ## FlavaForPreTraining [[autodoc]] FlavaForPreTraining - forward ## FlavaModel [[autodoc]] FlavaModel - forward - get_text_features - get_image_features ## FlavaImageCodebook [[autodoc]] FlavaImageCodebook - forward - get_codebook_indices - get_codebook_probs ## FlavaTextModel [[autodoc]] FlavaTextModel - forward ## FlavaImageModel [[autodoc]] FlavaImageModel - forward ## FlavaMultimodalModel [[autodoc]] FlavaMultimodalModel - forward
transformers/docs/source/en/model_doc/flava.md/0
{ "file_path": "transformers/docs/source/en/model_doc/flava.md", "repo_id": "transformers", "token_count": 916 }
258
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # GPTSAN-japanese ## Overview The GPTSAN-japanese model was released in the repository by Toshiyuki Sakamoto (tanreinama). GPTSAN is a Japanese language model using Switch Transformer. It has the same structure as the model introduced as Prefix LM in the T5 paper, and support both Text Generation and Masked Language Modeling tasks. These basic tasks similarly can fine-tune for translation or summarization. ### Usage example The `generate()` method can be used to generate text using GPTSAN-Japanese model. ```python >>> from transformers import AutoModel, AutoTokenizer >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("Tanrei/GPTSAN-japanese") >>> model = AutoModel.from_pretrained("Tanrei/GPTSAN-japanese").cuda() >>> x_tok = tokenizer("は、", prefix_text="織田信長", return_tensors="pt") >>> torch.manual_seed(0) >>> gen_tok = model.generate(x_tok.input_ids.cuda(), token_type_ids=x_tok.token_type_ids.cuda(), max_new_tokens=20) >>> tokenizer.decode(gen_tok[0]) '織田信長は、2004年に『戦国BASARA』のために、豊臣秀吉' ``` ## GPTSAN Features GPTSAN has some unique features. It has a model structure of Prefix-LM. It works as a shifted Masked Language Model for Prefix Input tokens. Un-prefixed inputs behave like normal generative models. The Spout vector is a GPTSAN specific input. Spout is pre-trained with random inputs, but you can specify a class of text or an arbitrary vector during fine-tuning. This allows you to indicate the tendency of the generated text. GPTSAN has a sparse Feed Forward based on Switch-Transformer. You can also add other layers and train them partially. See the original GPTSAN repository for details. ### Prefix-LM Model GPTSAN has the structure of the model named Prefix-LM in the `T5` paper. (The original GPTSAN repository calls it `hybrid`) In GPTSAN, the `Prefix` part of Prefix-LM, that is, the input position that can be referenced by both tokens, can be specified with any length. Arbitrary lengths can also be specified differently for each batch. This length applies to the text entered in `prefix_text` for the tokenizer. The tokenizer returns the mask of the `Prefix` part of Prefix-LM as `token_type_ids`. The model treats the part where `token_type_ids` is 1 as a `Prefix` part, that is, the input can refer to both tokens before and after. ## Usage tips Specifying the Prefix part is done with a mask passed to self-attention. When token_type_ids=None or all zero, it is equivalent to regular causal mask for example: >>> x_token = tokenizer("アイウエ") input_ids: | SOT | SEG | ア | イ | ウ | エ | token_type_ids: | 1 | 0 | 0 | 0 | 0 | 0 | prefix_lm_mask: SOT | 1 0 0 0 0 0 | SEG | 1 1 0 0 0 0 | ア | 1 1 1 0 0 0 | イ | 1 1 1 1 0 0 | ウ | 1 1 1 1 1 0 | エ | 1 1 1 1 1 1 | >>> x_token = tokenizer("", prefix_text="アイウエ") input_ids: | SOT | ア | イ | ウ | エ | SEG | token_type_ids: | 1 | 1 | 1 | 1 | 1 | 0 | prefix_lm_mask: SOT | 1 1 1 1 1 0 | ア | 1 1 1 1 1 0 | イ | 1 1 1 1 1 0 | ウ | 1 1 1 1 1 0 | エ | 1 1 1 1 1 0 | SEG | 1 1 1 1 1 1 | >>> x_token = tokenizer("ウエ", prefix_text="アイ") input_ids: | SOT | ア | イ | SEG | ウ | エ | token_type_ids: | 1 | 1 | 1 | 0 | 0 | 0 | prefix_lm_mask: SOT | 1 1 1 0 0 0 | ア | 1 1 1 0 0 0 | イ | 1 1 1 0 0 0 | SEG | 1 1 1 1 0 0 | ウ | 1 1 1 1 1 0 | エ | 1 1 1 1 1 1 | ### Spout Vector A Spout Vector is a special vector for controlling text generation. This vector is treated as the first embedding in self-attention to bring extraneous attention to the generated tokens. In the pre-trained model published from `Tanrei/GPTSAN-japanese`, the Spout Vector is a 128-dimensional vector that passes through 8 fully connected layers in the model and is projected into the space acting as external attention. The Spout Vector projected by the fully connected layer is split to be passed to all self-attentions. ## GPTSanJapaneseConfig [[autodoc]] GPTSanJapaneseConfig ## GPTSanJapaneseTokenizer [[autodoc]] GPTSanJapaneseTokenizer ## GPTSanJapaneseModel [[autodoc]] GPTSanJapaneseModel ## GPTSanJapaneseForConditionalGeneration [[autodoc]] GPTSanJapaneseForConditionalGeneration - forward
transformers/docs/source/en/model_doc/gptsan-japanese.md/0
{ "file_path": "transformers/docs/source/en/model_doc/gptsan-japanese.md", "repo_id": "transformers", "token_count": 1659 }
259
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # LED ## Overview The LED model was proposed in [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan. The abstract from the paper is the following: *Transformer-based models are unable to process long sequences due to their self-attention operation, which scales quadratically with the sequence length. To address this limitation, we introduce the Longformer with an attention mechanism that scales linearly with sequence length, making it easy to process documents of thousands of tokens or longer. Longformer's attention mechanism is a drop-in replacement for the standard self-attention and combines a local windowed attention with a task motivated global attention. Following prior work on long-sequence transformers, we evaluate Longformer on character-level language modeling and achieve state-of-the-art results on text8 and enwik8. In contrast to most prior work, we also pretrain Longformer and finetune it on a variety of downstream tasks. Our pretrained Longformer consistently outperforms RoBERTa on long document tasks and sets new state-of-the-art results on WikiHop and TriviaQA. We finally introduce the Longformer-Encoder-Decoder (LED), a Longformer variant for supporting long document generative sequence-to-sequence tasks, and demonstrate its effectiveness on the arXiv summarization dataset.* ## Usage tips - [`LEDForConditionalGeneration`] is an extension of [`BartForConditionalGeneration`] exchanging the traditional *self-attention* layer with *Longformer*'s *chunked self-attention* layer. [`LEDTokenizer`] is an alias of [`BartTokenizer`]. - LED works very well on long-range *sequence-to-sequence* tasks where the `input_ids` largely exceed a length of 1024 tokens. - LED pads the `input_ids` to be a multiple of `config.attention_window` if required. Therefore a small speed-up is gained, when [`LEDTokenizer`] is used with the `pad_to_multiple_of` argument. - LED makes use of *global attention* by means of the `global_attention_mask` (see [`LongformerModel`]). For summarization, it is advised to put *global attention* only on the first `<s>` token. For question answering, it is advised to put *global attention* on all tokens of the question. - To fine-tune LED on all 16384, *gradient checkpointing* can be enabled in case training leads to out-of-memory (OOM) errors. This can be done by executing `model.gradient_checkpointing_enable()`. Moreover, the `use_cache=False` flag can be used to disable the caching mechanism to save memory. - LED is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. This model was contributed by [patrickvonplaten](https://huggingface.co/patrickvonplaten). ## Resources - [A notebook showing how to evaluate LED](https://colab.research.google.com/drive/12INTTR6n64TzS4RrXZxMSXfrOd9Xzamo?usp=sharing). - [A notebook showing how to fine-tune LED](https://colab.research.google.com/drive/12LjJazBl7Gam0XBPy_y0CTOJZeZ34c2v?usp=sharing). - [Text classification task guide](../tasks/sequence_classification) - [Question answering task guide](../tasks/question_answering) - [Translation task guide](../tasks/translation) - [Summarization task guide](../tasks/summarization) ## LEDConfig [[autodoc]] LEDConfig ## LEDTokenizer [[autodoc]] LEDTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## LEDTokenizerFast [[autodoc]] LEDTokenizerFast ## LED specific outputs [[autodoc]] models.led.modeling_led.LEDEncoderBaseModelOutput [[autodoc]] models.led.modeling_led.LEDSeq2SeqModelOutput [[autodoc]] models.led.modeling_led.LEDSeq2SeqLMOutput [[autodoc]] models.led.modeling_led.LEDSeq2SeqSequenceClassifierOutput [[autodoc]] models.led.modeling_led.LEDSeq2SeqQuestionAnsweringModelOutput [[autodoc]] models.led.modeling_tf_led.TFLEDEncoderBaseModelOutput [[autodoc]] models.led.modeling_tf_led.TFLEDSeq2SeqModelOutput [[autodoc]] models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput <frameworkcontent> <pt> ## LEDModel [[autodoc]] LEDModel - forward ## LEDForConditionalGeneration [[autodoc]] LEDForConditionalGeneration - forward ## LEDForSequenceClassification [[autodoc]] LEDForSequenceClassification - forward ## LEDForQuestionAnswering [[autodoc]] LEDForQuestionAnswering - forward </pt> <tf> ## TFLEDModel [[autodoc]] TFLEDModel - call ## TFLEDForConditionalGeneration [[autodoc]] TFLEDForConditionalGeneration - call </tf> </frameworkcontent>
transformers/docs/source/en/model_doc/led.md/0
{ "file_path": "transformers/docs/source/en/model_doc/led.md", "repo_id": "transformers", "token_count": 1602 }
260
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Mask2Former ## Overview The Mask2Former model was proposed in [Masked-attention Mask Transformer for Universal Image Segmentation](https://arxiv.org/abs/2112.01527) by Bowen Cheng, Ishan Misra, Alexander G. Schwing, Alexander Kirillov, Rohit Girdhar. Mask2Former is a unified framework for panoptic, instance and semantic segmentation and features significant performance and efficiency improvements over [MaskFormer](maskformer). The abstract from the paper is the following: *Image segmentation groups pixels with different semantics, e.g., category or instance membership. Each choice of semantics defines a task. While only the semantics of each task differ, current research focuses on designing specialized architectures for each task. We present Masked-attention Mask Transformer (Mask2Former), a new architecture capable of addressing any image segmentation task (panoptic, instance or semantic). Its key components include masked attention, which extracts localized features by constraining cross-attention within predicted mask regions. In addition to reducing the research effort by at least three times, it outperforms the best specialized architectures by a significant margin on four popular datasets. Most notably, Mask2Former sets a new state-of-the-art for panoptic segmentation (57.8 PQ on COCO), instance segmentation (50.1 AP on COCO) and semantic segmentation (57.7 mIoU on ADE20K).* <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/mask2former_architecture.jpg" alt="drawing" width="600"/> <small> Mask2Former architecture. Taken from the <a href="https://arxiv.org/abs/2112.01527">original paper.</a> </small> This model was contributed by [Shivalika Singh](https://huggingface.co/shivi) and [Alara Dirik](https://huggingface.co/adirik). The original code can be found [here](https://github.com/facebookresearch/Mask2Former). ## Usage tips - Mask2Former uses the same preprocessing and postprocessing steps as [MaskFormer](maskformer). Use [`Mask2FormerImageProcessor`] or [`AutoImageProcessor`] to prepare images and optional targets for the model. - To get the final segmentation, depending on the task, you can call [`~Mask2FormerImageProcessor.post_process_semantic_segmentation`] or [`~Mask2FormerImageProcessor.post_process_instance_segmentation`] or [`~Mask2FormerImageProcessor.post_process_panoptic_segmentation`]. All three tasks can be solved using [`Mask2FormerForUniversalSegmentation`] output, panoptic segmentation accepts an optional `label_ids_to_fuse` argument to fuse instances of the target object/s (e.g. sky) together. ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with Mask2Former. - Demo notebooks regarding inference + fine-tuning Mask2Former on custom data can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/Mask2Former). If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we will review it. The resource should ideally demonstrate something new instead of duplicating an existing resource. ## Mask2FormerConfig [[autodoc]] Mask2FormerConfig ## MaskFormer specific outputs [[autodoc]] models.mask2former.modeling_mask2former.Mask2FormerModelOutput [[autodoc]] models.mask2former.modeling_mask2former.Mask2FormerForUniversalSegmentationOutput ## Mask2FormerModel [[autodoc]] Mask2FormerModel - forward ## Mask2FormerForUniversalSegmentation [[autodoc]] Mask2FormerForUniversalSegmentation - forward ## Mask2FormerImageProcessor [[autodoc]] Mask2FormerImageProcessor - preprocess - encode_inputs - post_process_semantic_segmentation - post_process_instance_segmentation - post_process_panoptic_segmentation
transformers/docs/source/en/model_doc/mask2former.md/0
{ "file_path": "transformers/docs/source/en/model_doc/mask2former.md", "repo_id": "transformers", "token_count": 1219 }
261
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Open-Llama <Tip warning={true}> This model is in maintenance mode only, we don't accept any new PRs changing its code. If you run into any issues running this model, please reinstall the last version that supported this model: v4.31.0. You can do so by running the following command: `pip install -U transformers==4.31.0`. </Tip> <Tip warning={true}> This model differs from the [OpenLLaMA models](https://huggingface.co/models?search=openllama) on the Hugging Face Hub, which primarily use the [LLaMA](llama) architecture. </Tip> ## Overview The Open-Llama model was proposed in the open source Open-Llama project by community developer s-JoL. The model is mainly based on LLaMA with some modifications, incorporating memory-efficient attention from Xformers, stable embedding from Bloom, and shared input-output embedding from PaLM. And the model is pre-trained on both Chinese and English, which gives it better performance on Chinese language tasks. This model was contributed by [s-JoL](https://huggingface.co/s-JoL). The original code was released on GitHub by [s-JoL](https://github.com/s-JoL), but is now removed. ## OpenLlamaConfig [[autodoc]] OpenLlamaConfig ## OpenLlamaModel [[autodoc]] OpenLlamaModel - forward ## OpenLlamaForCausalLM [[autodoc]] OpenLlamaForCausalLM - forward ## OpenLlamaForSequenceClassification [[autodoc]] OpenLlamaForSequenceClassification - forward
transformers/docs/source/en/model_doc/open-llama.md/0
{ "file_path": "transformers/docs/source/en/model_doc/open-llama.md", "repo_id": "transformers", "token_count": 620 }
262
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Pop2Piano <div class="flex flex-wrap space-x-1"> <a href="https://huggingface.co/spaces/sweetcocoa/pop2piano"> <img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"> </a> </div> ## Overview The Pop2Piano model was proposed in [Pop2Piano : Pop Audio-based Piano Cover Generation](https://arxiv.org/abs/2211.00895) by Jongho Choi and Kyogu Lee. Piano covers of pop music are widely enjoyed, but generating them from music is not a trivial task. It requires great expertise with playing piano as well as knowing different characteristics and melodies of a song. With Pop2Piano you can directly generate a cover from a song's audio waveform. It is the first model to directly generate a piano cover from pop audio without melody and chord extraction modules. Pop2Piano is an encoder-decoder Transformer model based on [T5](https://arxiv.org/pdf/1910.10683.pdf). The input audio is transformed to its waveform and passed to the encoder, which transforms it to a latent representation. The decoder uses these latent representations to generate token ids in an autoregressive way. Each token id corresponds to one of four different token types: time, velocity, note and 'special'. The token ids are then decoded to their equivalent MIDI file. The abstract from the paper is the following: *Piano covers of pop music are enjoyed by many people. However, the task of automatically generating piano covers of pop music is still understudied. This is partly due to the lack of synchronized {Pop, Piano Cover} data pairs, which made it challenging to apply the latest data-intensive deep learning-based methods. To leverage the power of the data-driven approach, we make a large amount of paired and synchronized {Pop, Piano Cover} data using an automated pipeline. In this paper, we present Pop2Piano, a Transformer network that generates piano covers given waveforms of pop music. To the best of our knowledge, this is the first model to generate a piano cover directly from pop audio without using melody and chord extraction modules. We show that Pop2Piano, trained with our dataset, is capable of producing plausible piano covers.* This model was contributed by [Susnato Dhar](https://huggingface.co/susnato). The original code can be found [here](https://github.com/sweetcocoa/pop2piano). ## Usage tips * To use Pop2Piano, you will need to install the 🤗 Transformers library, as well as the following third party modules: ```bash pip install pretty-midi==0.2.9 essentia==2.1b6.dev1034 librosa scipy ``` Please note that you may need to restart your runtime after installation. * Pop2Piano is an Encoder-Decoder based model like T5. * Pop2Piano can be used to generate midi-audio files for a given audio sequence. * Choosing different composers in `Pop2PianoForConditionalGeneration.generate()` can lead to variety of different results. * Setting the sampling rate to 44.1 kHz when loading the audio file can give good performance. * Though Pop2Piano was mainly trained on Korean Pop music, it also does pretty well on other Western Pop or Hip Hop songs. ## Examples - Example using HuggingFace Dataset: ```python >>> from datasets import load_dataset >>> from transformers import Pop2PianoForConditionalGeneration, Pop2PianoProcessor >>> model = Pop2PianoForConditionalGeneration.from_pretrained("sweetcocoa/pop2piano") >>> processor = Pop2PianoProcessor.from_pretrained("sweetcocoa/pop2piano") >>> ds = load_dataset("sweetcocoa/pop2piano_ci", split="test") >>> inputs = processor( ... audio=ds["audio"][0]["array"], sampling_rate=ds["audio"][0]["sampling_rate"], return_tensors="pt" ... ) >>> model_output = model.generate(input_features=inputs["input_features"], composer="composer1") >>> tokenizer_output = processor.batch_decode( ... token_ids=model_output, feature_extractor_output=inputs ... )["pretty_midi_objects"][0] >>> tokenizer_output.write("./Outputs/midi_output.mid") ``` - Example using your own audio file: ```python >>> import librosa >>> from transformers import Pop2PianoForConditionalGeneration, Pop2PianoProcessor >>> audio, sr = librosa.load("<your_audio_file_here>", sr=44100) # feel free to change the sr to a suitable value. >>> model = Pop2PianoForConditionalGeneration.from_pretrained("sweetcocoa/pop2piano") >>> processor = Pop2PianoProcessor.from_pretrained("sweetcocoa/pop2piano") >>> inputs = processor(audio=audio, sampling_rate=sr, return_tensors="pt") >>> model_output = model.generate(input_features=inputs["input_features"], composer="composer1") >>> tokenizer_output = processor.batch_decode( ... token_ids=model_output, feature_extractor_output=inputs ... )["pretty_midi_objects"][0] >>> tokenizer_output.write("./Outputs/midi_output.mid") ``` - Example of processing multiple audio files in batch: ```python >>> import librosa >>> from transformers import Pop2PianoForConditionalGeneration, Pop2PianoProcessor >>> # feel free to change the sr to a suitable value. >>> audio1, sr1 = librosa.load("<your_first_audio_file_here>", sr=44100) >>> audio2, sr2 = librosa.load("<your_second_audio_file_here>", sr=44100) >>> model = Pop2PianoForConditionalGeneration.from_pretrained("sweetcocoa/pop2piano") >>> processor = Pop2PianoProcessor.from_pretrained("sweetcocoa/pop2piano") >>> inputs = processor(audio=[audio1, audio2], sampling_rate=[sr1, sr2], return_attention_mask=True, return_tensors="pt") >>> # Since we now generating in batch(2 audios) we must pass the attention_mask >>> model_output = model.generate( ... input_features=inputs["input_features"], ... attention_mask=inputs["attention_mask"], ... composer="composer1", ... ) >>> tokenizer_output = processor.batch_decode( ... token_ids=model_output, feature_extractor_output=inputs ... )["pretty_midi_objects"] >>> # Since we now have 2 generated MIDI files >>> tokenizer_output[0].write("./Outputs/midi_output1.mid") >>> tokenizer_output[1].write("./Outputs/midi_output2.mid") ``` - Example of processing multiple audio files in batch (Using `Pop2PianoFeatureExtractor` and `Pop2PianoTokenizer`): ```python >>> import librosa >>> from transformers import Pop2PianoForConditionalGeneration, Pop2PianoFeatureExtractor, Pop2PianoTokenizer >>> # feel free to change the sr to a suitable value. >>> audio1, sr1 = librosa.load("<your_first_audio_file_here>", sr=44100) >>> audio2, sr2 = librosa.load("<your_second_audio_file_here>", sr=44100) >>> model = Pop2PianoForConditionalGeneration.from_pretrained("sweetcocoa/pop2piano") >>> feature_extractor = Pop2PianoFeatureExtractor.from_pretrained("sweetcocoa/pop2piano") >>> tokenizer = Pop2PianoTokenizer.from_pretrained("sweetcocoa/pop2piano") >>> inputs = feature_extractor( ... audio=[audio1, audio2], ... sampling_rate=[sr1, sr2], ... return_attention_mask=True, ... return_tensors="pt", ... ) >>> # Since we now generating in batch(2 audios) we must pass the attention_mask >>> model_output = model.generate( ... input_features=inputs["input_features"], ... attention_mask=inputs["attention_mask"], ... composer="composer1", ... ) >>> tokenizer_output = tokenizer.batch_decode( ... token_ids=model_output, feature_extractor_output=inputs ... )["pretty_midi_objects"] >>> # Since we now have 2 generated MIDI files >>> tokenizer_output[0].write("./Outputs/midi_output1.mid") >>> tokenizer_output[1].write("./Outputs/midi_output2.mid") ``` ## Pop2PianoConfig [[autodoc]] Pop2PianoConfig ## Pop2PianoFeatureExtractor [[autodoc]] Pop2PianoFeatureExtractor - __call__ ## Pop2PianoForConditionalGeneration [[autodoc]] Pop2PianoForConditionalGeneration - forward - generate ## Pop2PianoTokenizer [[autodoc]] Pop2PianoTokenizer - __call__ ## Pop2PianoProcessor [[autodoc]] Pop2PianoProcessor - __call__
transformers/docs/source/en/model_doc/pop2piano.md/0
{ "file_path": "transformers/docs/source/en/model_doc/pop2piano.md", "repo_id": "transformers", "token_count": 2624 }
263
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # RoFormer ## Overview The RoFormer model was proposed in [RoFormer: Enhanced Transformer with Rotary Position Embedding](https://arxiv.org/pdf/2104.09864v1.pdf) by Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu. The abstract from the paper is the following: *Position encoding in transformer architecture provides supervision for dependency modeling between elements at different positions in the sequence. We investigate various methods to encode positional information in transformer-based language models and propose a novel implementation named Rotary Position Embedding(RoPE). The proposed RoPE encodes absolute positional information with rotation matrix and naturally incorporates explicit relative position dependency in self-attention formulation. Notably, RoPE comes with valuable properties such as flexibility of being expand to any sequence lengths, decaying inter-token dependency with increasing relative distances, and capability of equipping the linear self-attention with relative position encoding. As a result, the enhanced transformer with rotary position embedding, or RoFormer, achieves superior performance in tasks with long texts. We release the theoretical analysis along with some preliminary experiment results on Chinese data. The undergoing experiment for English benchmark will soon be updated.* This model was contributed by [junnyu](https://huggingface.co/junnyu). The original code can be found [here](https://github.com/ZhuiyiTechnology/roformer). ## Usage tips RoFormer is a BERT-like autoencoding model with rotary position embeddings. Rotary position embeddings have shown improved performance on classification tasks with long texts. ## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) - [Question answering task guide](../tasks/question_answering) - [Causal language modeling task guide](../tasks/language_modeling) - [Masked language modeling task guide](../tasks/masked_language_modeling) - [Multiple choice task guide](../tasks/multiple_choice) ## RoFormerConfig [[autodoc]] RoFormerConfig ## RoFormerTokenizer [[autodoc]] RoFormerTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## RoFormerTokenizerFast [[autodoc]] RoFormerTokenizerFast - build_inputs_with_special_tokens <frameworkcontent> <pt> ## RoFormerModel [[autodoc]] RoFormerModel - forward ## RoFormerForCausalLM [[autodoc]] RoFormerForCausalLM - forward ## RoFormerForMaskedLM [[autodoc]] RoFormerForMaskedLM - forward ## RoFormerForSequenceClassification [[autodoc]] RoFormerForSequenceClassification - forward ## RoFormerForMultipleChoice [[autodoc]] RoFormerForMultipleChoice - forward ## RoFormerForTokenClassification [[autodoc]] RoFormerForTokenClassification - forward ## RoFormerForQuestionAnswering [[autodoc]] RoFormerForQuestionAnswering - forward </pt> <tf> ## TFRoFormerModel [[autodoc]] TFRoFormerModel - call ## TFRoFormerForMaskedLM [[autodoc]] TFRoFormerForMaskedLM - call ## TFRoFormerForCausalLM [[autodoc]] TFRoFormerForCausalLM - call ## TFRoFormerForSequenceClassification [[autodoc]] TFRoFormerForSequenceClassification - call ## TFRoFormerForMultipleChoice [[autodoc]] TFRoFormerForMultipleChoice - call ## TFRoFormerForTokenClassification [[autodoc]] TFRoFormerForTokenClassification - call ## TFRoFormerForQuestionAnswering [[autodoc]] TFRoFormerForQuestionAnswering - call </tf> <jax> ## FlaxRoFormerModel [[autodoc]] FlaxRoFormerModel - __call__ ## FlaxRoFormerForMaskedLM [[autodoc]] FlaxRoFormerForMaskedLM - __call__ ## FlaxRoFormerForSequenceClassification [[autodoc]] FlaxRoFormerForSequenceClassification - __call__ ## FlaxRoFormerForMultipleChoice [[autodoc]] FlaxRoFormerForMultipleChoice - __call__ ## FlaxRoFormerForTokenClassification [[autodoc]] FlaxRoFormerForTokenClassification - __call__ ## FlaxRoFormerForQuestionAnswering [[autodoc]] FlaxRoFormerForQuestionAnswering - __call__ </jax> </frameworkcontent>
transformers/docs/source/en/model_doc/roformer.md/0
{ "file_path": "transformers/docs/source/en/model_doc/roformer.md", "repo_id": "transformers", "token_count": 1450 }
264
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # StableLM ## Overview `StableLM 3B 4E1T` was proposed in [`StableLM 3B 4E1T`: Technical Report](https://stability.wandb.io/stability-llm/stable-lm/reports/StableLM-3B-4E1T--VmlldzoyMjU4?accessToken=u3zujipenkx5g7rtcj9qojjgxpconyjktjkli2po09nffrffdhhchq045vp0wyfo) by Stability AI and is the first model in a series of multi-epoch pre-trained language models. ### Model Details `StableLM 3B 4E1T` is a decoder-only base language model pre-trained on 1 trillion tokens of diverse English and code datasets for four epochs. The model architecture is transformer-based with partial Rotary Position Embeddings, SwiGLU activation, LayerNorm, etc. We also provide `StableLM Zephyr 3B`, an instruction fine-tuned version of the model that can be used for chat-based applications. ### Usage Tips - The architecture is similar to LLaMA but with RoPE applied to 25% of head embedding dimensions, LayerNorm instead of RMSNorm, and optional QKV bias terms. - `StableLM 3B 4E1T`-based models uses the same tokenizer as [`GPTNeoXTokenizerFast`]. `StableLM 3B 4E1T` and `StableLM Zephyr 3B` can be found on the [Huggingface Hub](https://huggingface.co/stabilityai) The following code snippet demonstrates how to use `StableLM 3B 4E1T` for inference: ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer >>> device = "cuda" # the device to load the model onto >>> tokenizer = AutoTokenizer.from_pretrained("stabilityai/stablelm-3b-4e1t") >>> model = AutoModelForCausalLM.from_pretrained("stabilityai/stablelm-3b-4e1t") >>> model.to(device) >>> model_inputs = tokenizer("The weather is always wonderful in", return_tensors="pt").to(model.device) >>> generated_ids = model.generate(**model_inputs, max_length=32, do_sample=True) >>> responses = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) >>> responses ['The weather is always wonderful in Santa Barbara and, for visitors hoping to make the move to our beautiful seaside city, this town offers plenty of great places to...'] ``` ## Combining StableLM and Flash Attention 2 First, make sure to install the latest version of Flash Attention v2. ```bash pip install -U flash-attn --no-build-isolation ``` Also make sure that your hardware is compatible with Flash-Attention 2. Read more about it in the official documentation of the [`flash-attn`](https://github.com/Dao-AILab/flash-attention) repository. Note: you must load your model in half-precision (e.g. `torch.bfloat16`). Now, to run the model with Flash Attention 2, refer to the snippet below: ```python >>> import torch >>> from transformers import AutoModelForCausalLM, AutoTokenizer >>> device = "cuda" # the device to load the model onto >>> tokenizer = AutoTokenizer.from_pretrained("stabilityai/stablelm-3b-4e1t") >>> model = AutoModelForCausalLM.from_pretrained("stabilityai/stablelm-3b-4e1t", torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2") >>> model.to(device) >>> model_inputs = tokenizer("The weather is always wonderful in", return_tensors="pt").to(model.device) >>> generated_ids = model.generate(**model_inputs, max_length=32, do_sample=True) >>> responses = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) >>> responses ['The weather is always wonderful in Santa Barbara and, for visitors hoping to make the move to our beautiful seaside city, this town offers plenty of great places to...'] ``` ## StableLmConfig [[autodoc]] StableLmConfig ## StableLmModel [[autodoc]] StableLmModel - forward ## StableLmForCausalLM [[autodoc]] StableLmForCausalLM - forward ## StableLmForSequenceClassification [[autodoc]] StableLmForSequenceClassification - forward
transformers/docs/source/en/model_doc/stablelm.md/0
{ "file_path": "transformers/docs/source/en/model_doc/stablelm.md", "repo_id": "transformers", "token_count": 1373 }
265
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Transformer XL <Tip warning={true}> This model is in maintenance mode only, so we won't accept any new PRs changing its code. This model was deprecated due to security issues linked to `pickle.load`. We recommend switching to more recent models for improved security. In case you would still like to use `TransfoXL` in your experiments, we recommend using the [Hub checkpoint](https://huggingface.co/transfo-xl/transfo-xl-wt103) with a specific revision to ensure you are downloading safe files from the Hub. You will need to set the environment variable `TRUST_REMOTE_CODE` to `True` in order to allow the usage of `pickle.load()`: ```python import os from transformers import TransfoXLTokenizer, TransfoXLLMHeadModel os.environ["TRUST_REMOTE_CODE"] = "True" checkpoint = 'transfo-xl/transfo-xl-wt103' revision = '40a186da79458c9f9de846edfaea79c412137f97' tokenizer = TransfoXLTokenizer.from_pretrained(checkpoint, revision=revision) model = TransfoXLLMHeadModel.from_pretrained(checkpoint, revision=revision) ``` If you run into any issues running this model, please reinstall the last version that supported this model: v4.35.0. You can do so by running the following command: `pip install -U transformers==4.35.0`. </Tip> <div class="flex flex-wrap space-x-1"> <a href="https://huggingface.co/models?filter=transfo-xl"> <img alt="Models" src="https://img.shields.io/badge/All_model_pages-transfo--xl-blueviolet"> </a> <a href="https://huggingface.co/spaces/docs-demos/transfo-xl-wt103"> <img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"> </a> </div> ## Overview The Transformer-XL model was proposed in [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai, Zhilin Yang, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov. It's a causal (uni-directional) transformer with relative positioning (sinusoïdal) embeddings which can reuse previously computed hidden-states to attend to longer context (memory). This model also uses adaptive softmax inputs and outputs (tied). The abstract from the paper is the following: *Transformers have a potential of learning longer-term dependency, but are limited by a fixed-length context in the setting of language modeling. We propose a novel neural architecture Transformer-XL that enables learning dependency beyond a fixed length without disrupting temporal coherence. It consists of a segment-level recurrence mechanism and a novel positional encoding scheme. Our method not only enables capturing longer-term dependency, but also resolves the context fragmentation problem. As a result, Transformer-XL learns dependency that is 80% longer than RNNs and 450% longer than vanilla Transformers, achieves better performance on both short and long sequences, and is up to 1,800+ times faster than vanilla Transformers during evaluation. Notably, we improve the state-of-the-art results of bpc/perplexity to 0.99 on enwiki8, 1.08 on text8, 18.3 on WikiText-103, 21.8 on One Billion Word, and 54.5 on Penn Treebank (without finetuning). When trained only on WikiText-103, Transformer-XL manages to generate reasonably coherent, novel text articles with thousands of tokens.* This model was contributed by [thomwolf](https://huggingface.co/thomwolf). The original code can be found [here](https://github.com/kimiyoung/transformer-xl). ## Usage tips - Transformer-XL uses relative sinusoidal positional embeddings. Padding can be done on the left or on the right. The original implementation trains on SQuAD with padding on the left, therefore the padding defaults are set to left. - Transformer-XL is one of the few models that has no sequence length limit. - Same as a regular GPT model, but introduces a recurrence mechanism for two consecutive segments (similar to a regular RNNs with two consecutive inputs). In this context, a segment is a number of consecutive tokens (for instance 512) that may span across multiple documents, and segments are fed in order to the model. - Basically, the hidden states of the previous segment are concatenated to the current input to compute the attention scores. This allows the model to pay attention to information that was in the previous segment as well as the current one. By stacking multiple attention layers, the receptive field can be increased to multiple previous segments. - This changes the positional embeddings to positional relative embeddings (as the regular positional embeddings would give the same results in the current input and the current hidden state at a given position) and needs to make some adjustments in the way attention scores are computed. <Tip warning={true}> TransformerXL does **not** work with *torch.nn.DataParallel* due to a bug in PyTorch, see [issue #36035](https://github.com/pytorch/pytorch/issues/36035) </Tip> ## Resources - [Text classification task guide](../tasks/sequence_classification) - [Causal language modeling task guide](../tasks/language_modeling) ## TransfoXLConfig [[autodoc]] TransfoXLConfig ## TransfoXLTokenizer [[autodoc]] TransfoXLTokenizer - save_vocabulary ## TransfoXL specific outputs [[autodoc]] models.deprecated.transfo_xl.modeling_transfo_xl.TransfoXLModelOutput [[autodoc]] models.deprecated.transfo_xl.modeling_transfo_xl.TransfoXLLMHeadModelOutput [[autodoc]] models.deprecated.transfo_xl.modeling_tf_transfo_xl.TFTransfoXLModelOutput [[autodoc]] models.deprecated.transfo_xl.modeling_tf_transfo_xl.TFTransfoXLLMHeadModelOutput <frameworkcontent> <pt> ## TransfoXLModel [[autodoc]] TransfoXLModel - forward ## TransfoXLLMHeadModel [[autodoc]] TransfoXLLMHeadModel - forward ## TransfoXLForSequenceClassification [[autodoc]] TransfoXLForSequenceClassification - forward </pt> <tf> ## TFTransfoXLModel [[autodoc]] TFTransfoXLModel - call ## TFTransfoXLLMHeadModel [[autodoc]] TFTransfoXLLMHeadModel - call ## TFTransfoXLForSequenceClassification [[autodoc]] TFTransfoXLForSequenceClassification - call </tf> </frameworkcontent> ## Internal Layers [[autodoc]] AdaptiveEmbedding [[autodoc]] TFAdaptiveEmbedding
transformers/docs/source/en/model_doc/transfo-xl.md/0
{ "file_path": "transformers/docs/source/en/model_doc/transfo-xl.md", "repo_id": "transformers", "token_count": 2000 }
266
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # VisionTextDualEncoder ## Overview The [`VisionTextDualEncoderModel`] can be used to initialize a vision-text dual encoder model with any pretrained vision autoencoding model as the vision encoder (*e.g.* [ViT](vit), [BEiT](beit), [DeiT](deit)) and any pretrained text autoencoding model as the text encoder (*e.g.* [RoBERTa](roberta), [BERT](bert)). Two projection layers are added on top of both the vision and text encoder to project the output embeddings to a shared latent space. The projection layers are randomly initialized so the model should be fine-tuned on a downstream task. This model can be used to align the vision-text embeddings using CLIP like contrastive image-text training and then can be used for zero-shot vision tasks such image-classification or retrieval. In [LiT: Zero-Shot Transfer with Locked-image Text Tuning](https://arxiv.org/abs/2111.07991) it is shown how leveraging pre-trained (locked/frozen) image and text model for contrastive learning yields significant improvement on new zero-shot vision tasks such as image classification or retrieval. ## VisionTextDualEncoderConfig [[autodoc]] VisionTextDualEncoderConfig ## VisionTextDualEncoderProcessor [[autodoc]] VisionTextDualEncoderProcessor <frameworkcontent> <pt> ## VisionTextDualEncoderModel [[autodoc]] VisionTextDualEncoderModel - forward </pt> <tf> ## FlaxVisionTextDualEncoderModel [[autodoc]] FlaxVisionTextDualEncoderModel - __call__ </tf> <jax> ## TFVisionTextDualEncoderModel [[autodoc]] TFVisionTextDualEncoderModel - call </jax> </frameworkcontent>
transformers/docs/source/en/model_doc/vision-text-dual-encoder.md/0
{ "file_path": "transformers/docs/source/en/model_doc/vision-text-dual-encoder.md", "repo_id": "transformers", "token_count": 652 }
267
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Multilingual models for inference [[open-in-colab]] There are several multilingual models in 🤗 Transformers, and their inference usage differs from monolingual models. Not *all* multilingual model usage is different though. Some models, like [google-bert/bert-base-multilingual-uncased](https://huggingface.co/google-bert/bert-base-multilingual-uncased), can be used just like a monolingual model. This guide will show you how to use multilingual models whose usage differs for inference. ## XLM XLM has ten different checkpoints, only one of which is monolingual. The nine remaining model checkpoints can be split into two categories: the checkpoints that use language embeddings and those that don't. ### XLM with language embeddings The following XLM models use language embeddings to specify the language used at inference: - `FacebookAI/xlm-mlm-ende-1024` (Masked language modeling, English-German) - `FacebookAI/xlm-mlm-enfr-1024` (Masked language modeling, English-French) - `FacebookAI/xlm-mlm-enro-1024` (Masked language modeling, English-Romanian) - `FacebookAI/xlm-mlm-xnli15-1024` (Masked language modeling, XNLI languages) - `FacebookAI/xlm-mlm-tlm-xnli15-1024` (Masked language modeling + translation, XNLI languages) - `FacebookAI/xlm-clm-enfr-1024` (Causal language modeling, English-French) - `FacebookAI/xlm-clm-ende-1024` (Causal language modeling, English-German) Language embeddings are represented as a tensor of the same shape as the `input_ids` passed to the model. The values in these tensors depend on the language used and are identified by the tokenizer's `lang2id` and `id2lang` attributes. In this example, load the `FacebookAI/xlm-clm-enfr-1024` checkpoint (Causal language modeling, English-French): ```py >>> import torch >>> from transformers import XLMTokenizer, XLMWithLMHeadModel >>> tokenizer = XLMTokenizer.from_pretrained("FacebookAI/xlm-clm-enfr-1024") >>> model = XLMWithLMHeadModel.from_pretrained("FacebookAI/xlm-clm-enfr-1024") ``` The `lang2id` attribute of the tokenizer displays this model's languages and their ids: ```py >>> print(tokenizer.lang2id) {'en': 0, 'fr': 1} ``` Next, create an example input: ```py >>> input_ids = torch.tensor([tokenizer.encode("Wikipedia was used to")]) # batch size of 1 ``` Set the language id as `"en"` and use it to define the language embedding. The language embedding is a tensor filled with `0` since that is the language id for English. This tensor should be the same size as `input_ids`. ```py >>> language_id = tokenizer.lang2id["en"] # 0 >>> langs = torch.tensor([language_id] * input_ids.shape[1]) # torch.tensor([0, 0, 0, ..., 0]) >>> # We reshape it to be of size (batch_size, sequence_length) >>> langs = langs.view(1, -1) # is now of shape [1, sequence_length] (we have a batch size of 1) ``` Now you can pass the `input_ids` and language embedding to the model: ```py >>> outputs = model(input_ids, langs=langs) ``` The [run_generation.py](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-generation/run_generation.py) script can generate text with language embeddings using the `xlm-clm` checkpoints. ### XLM without language embeddings The following XLM models do not require language embeddings during inference: - `FacebookAI/xlm-mlm-17-1280` (Masked language modeling, 17 languages) - `FacebookAI/xlm-mlm-100-1280` (Masked language modeling, 100 languages) These models are used for generic sentence representations, unlike the previous XLM checkpoints. ## BERT The following BERT models can be used for multilingual tasks: - `google-bert/bert-base-multilingual-uncased` (Masked language modeling + Next sentence prediction, 102 languages) - `google-bert/bert-base-multilingual-cased` (Masked language modeling + Next sentence prediction, 104 languages) These models do not require language embeddings during inference. They should identify the language from the context and infer accordingly. ## XLM-RoBERTa The following XLM-RoBERTa models can be used for multilingual tasks: - `FacebookAI/xlm-roberta-base` (Masked language modeling, 100 languages) - `FacebookAI/xlm-roberta-large` (Masked language modeling, 100 languages) XLM-RoBERTa was trained on 2.5TB of newly created and cleaned CommonCrawl data in 100 languages. It provides strong gains over previously released multilingual models like mBERT or XLM on downstream tasks like classification, sequence labeling, and question answering. ## M2M100 The following M2M100 models can be used for multilingual translation: - `facebook/m2m100_418M` (Translation) - `facebook/m2m100_1.2B` (Translation) In this example, load the `facebook/m2m100_418M` checkpoint to translate from Chinese to English. You can set the source language in the tokenizer: ```py >>> from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer >>> en_text = "Do not meddle in the affairs of wizards, for they are subtle and quick to anger." >>> chinese_text = "不要插手巫師的事務, 因為他們是微妙的, 很快就會發怒." >>> tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M", src_lang="zh") >>> model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M") ``` Tokenize the text: ```py >>> encoded_zh = tokenizer(chinese_text, return_tensors="pt") ``` M2M100 forces the target language id as the first generated token to translate to the target language. Set the `forced_bos_token_id` to `en` in the `generate` method to translate to English: ```py >>> generated_tokens = model.generate(**encoded_zh, forced_bos_token_id=tokenizer.get_lang_id("en")) >>> tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) 'Do not interfere with the matters of the witches, because they are delicate and will soon be angry.' ``` ## MBart The following MBart models can be used for multilingual translation: - `facebook/mbart-large-50-one-to-many-mmt` (One-to-many multilingual machine translation, 50 languages) - `facebook/mbart-large-50-many-to-many-mmt` (Many-to-many multilingual machine translation, 50 languages) - `facebook/mbart-large-50-many-to-one-mmt` (Many-to-one multilingual machine translation, 50 languages) - `facebook/mbart-large-50` (Multilingual translation, 50 languages) - `facebook/mbart-large-cc25` In this example, load the `facebook/mbart-large-50-many-to-many-mmt` checkpoint to translate Finnish to English. You can set the source language in the tokenizer: ```py >>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM >>> en_text = "Do not meddle in the affairs of wizards, for they are subtle and quick to anger." >>> fi_text = "Älä sekaannu velhojen asioihin, sillä ne ovat hienovaraisia ja nopeasti vihaisia." >>> tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-50-many-to-many-mmt", src_lang="fi_FI") >>> model = AutoModelForSeq2SeqLM.from_pretrained("facebook/mbart-large-50-many-to-many-mmt") ``` Tokenize the text: ```py >>> encoded_en = tokenizer(en_text, return_tensors="pt") ``` MBart forces the target language id as the first generated token to translate to the target language. Set the `forced_bos_token_id` to `en` in the `generate` method to translate to English: ```py >>> generated_tokens = model.generate(**encoded_en, forced_bos_token_id=tokenizer.lang_code_to_id["en_XX"]) >>> tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) "Don't interfere with the wizard's affairs, because they are subtle, will soon get angry." ``` If you are using the `facebook/mbart-large-50-many-to-one-mmt` checkpoint, you don't need to force the target language id as the first generated token otherwise the usage is the same.
transformers/docs/source/en/multilingual.md/0
{ "file_path": "transformers/docs/source/en/multilingual.md", "repo_id": "transformers", "token_count": 2588 }
268
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Philosophy 🤗 Transformers is an opinionated library built for: - machine learning researchers and educators seeking to use, study or extend large-scale Transformers models. - hands-on practitioners who want to fine-tune those models or serve them in production, or both. - engineers who just want to download a pretrained model and use it to solve a given machine learning task. The library was designed with two strong goals in mind: 1. Be as easy and fast to use as possible: - We strongly limited the number of user-facing abstractions to learn, in fact, there are almost no abstractions, just three standard classes required to use each model: [configuration](main_classes/configuration), [models](main_classes/model), and a preprocessing class ([tokenizer](main_classes/tokenizer) for NLP, [image processor](main_classes/image_processor) for vision, [feature extractor](main_classes/feature_extractor) for audio, and [processor](main_classes/processors) for multimodal inputs). - All of these classes can be initialized in a simple and unified way from pretrained instances by using a common `from_pretrained()` method which downloads (if needed), caches and loads the related class instance and associated data (configurations' hyperparameters, tokenizers' vocabulary, and models' weights) from a pretrained checkpoint provided on [Hugging Face Hub](https://huggingface.co/models) or your own saved checkpoint. - On top of those three base classes, the library provides two APIs: [`pipeline`] for quickly using a model for inference on a given task and [`Trainer`] to quickly train or fine-tune a PyTorch model (all TensorFlow models are compatible with `Keras.fit`). - As a consequence, this library is NOT a modular toolbox of building blocks for neural nets. If you want to extend or build upon the library, just use regular Python, PyTorch, TensorFlow, Keras modules and inherit from the base classes of the library to reuse functionalities like model loading and saving. If you'd like to learn more about our coding philosophy for models, check out our [Repeat Yourself](https://huggingface.co/blog/transformers-design-philosophy) blog post. 2. Provide state-of-the-art models with performances as close as possible to the original models: - We provide at least one example for each architecture which reproduces a result provided by the official authors of said architecture. - The code is usually as close to the original code base as possible which means some PyTorch code may be not as *pytorchic* as it could be as a result of being converted TensorFlow code and vice versa. A few other goals: - Expose the models' internals as consistently as possible: - We give access, using a single API, to the full hidden-states and attention weights. - The preprocessing classes and base model APIs are standardized to easily switch between models. - Incorporate a subjective selection of promising tools for fine-tuning and investigating these models: - A simple and consistent way to add new tokens to the vocabulary and embeddings for fine-tuning. - Simple ways to mask and prune Transformer heads. - Easily switch between PyTorch, TensorFlow 2.0 and Flax, allowing training with one framework and inference with another. ## Main concepts The library is built around three types of classes for each model: - **Model classes** can be PyTorch models ([torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module)), Keras models ([tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model)) or JAX/Flax models ([flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html)) that work with the pretrained weights provided in the library. - **Configuration classes** store the hyperparameters required to build a model (such as the number of layers and hidden size). You don't always need to instantiate these yourself. In particular, if you are using a pretrained model without any modification, creating the model will automatically take care of instantiating the configuration (which is part of the model). - **Preprocessing classes** convert the raw data into a format accepted by the model. A [tokenizer](main_classes/tokenizer) stores the vocabulary for each model and provide methods for encoding and decoding strings in a list of token embedding indices to be fed to a model. [Image processors](main_classes/image_processor) preprocess vision inputs, [feature extractors](main_classes/feature_extractor) preprocess audio inputs, and a [processor](main_classes/processors) handles multimodal inputs. All these classes can be instantiated from pretrained instances, saved locally, and shared on the Hub with three methods: - `from_pretrained()` lets you instantiate a model, configuration, and preprocessing class from a pretrained version either provided by the library itself (the supported models can be found on the [Model Hub](https://huggingface.co/models)) or stored locally (or on a server) by the user. - `save_pretrained()` lets you save a model, configuration, and preprocessing class locally so that it can be reloaded using `from_pretrained()`. - `push_to_hub()` lets you share a model, configuration, and a preprocessing class to the Hub, so it is easily accessible to everyone.
transformers/docs/source/en/philosophy.md/0
{ "file_path": "transformers/docs/source/en/philosophy.md", "repo_id": "transformers", "token_count": 1518 }
269
# docstyle-ignore INSTALL_CONTENT = """ # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git """ notebook_first_cells = [{"type": "code", "content": INSTALL_CONTENT}] black_avoid_patterns = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", "{object_class}": "FakeObjectClass", }
transformers/docs/source/es/_config.py/0
{ "file_path": "transformers/docs/source/es/_config.py", "repo_id": "transformers", "token_count": 155 }
270
<!--- Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Instalación En esta guía puedes encontrar información para instalar 🤗 Transformers para cualquier biblioteca de Machine Learning con la que estés trabajando. Además, encontrarás información sobre cómo establecer el caché y cómo configurar 🤗 Transformers para correrlo de manera offline (opcional). 🤗 Transformers ha sido probada en Python 3.6+, PyTorch 1.1.0+, TensorFlow 2.0+, y Flax. Para instalar la biblioteca de deep learning con la que desees trabajar, sigue las instrucciones correspondientes listadas a continuación: * [PyTorch](https://pytorch.org/get-started/locally/) * [TensorFlow 2.0](https://www.tensorflow.org/install/pip) * [Flax](https://flax.readthedocs.io/en/latest/) ## Instalación con pip Es necesario instalar 🤗 Transformers en un [entorno virtual](https://docs.python.org/3/library/venv.html). Si necesitas más información sobre entornos virtuales de Python, consulta esta [guía](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/ ). Un entorno virtual facilita el manejo de proyectos y evita problemas de compatibilidad entre dependencias. Comienza por crear un entorno virtual en el directorio de tu proyecto: ```bash python -m venv .env ``` Activa el entorno virtual: ```bash source .env/bin/activate ``` Ahora puedes instalar 🤗 Transformers con el siguiente comando: ```bash pip install transformers ``` Solo para CPU, puedes instalar 🤗 Transformers y una biblioteca de deep learning con un comando de una sola línea. Por ejemplo, instala 🤗 Transformers y Pytorch: ```bash pip install transformers[torch] ``` 🤗 Transformers y TensorFlow 2.0: ```bash pip install transformers[tf-cpu] ``` 🤗 Transformers y Flax: ```bash pip install transformers[flax] ``` Por último, revisa si 🤗 Transformers ha sido instalada exitosamente con el siguiente comando que descarga un modelo pre-entrenado: ```bash python -c "from transformers import pipeline; print(pipeline('sentiment-analysis')('we love you'))" ``` Después imprime la etiqueta y el puntaje: ```bash [{'label': 'POSITIVE', 'score': 0.9998704791069031}] ``` ## Instalación desde la fuente Instala 🤗 Transformers desde la fuente con el siguiente comando: ```bash pip install git+https://github.com/huggingface/transformers ``` El comando de arriba instala la versión `master` más actual en vez de la última versión estable. La versión `master` es útil para obtener los últimos avances de 🤗 Transformers. Por ejemplo, se puede dar el caso de que un error fue corregido después de la última versión estable pero aún no se ha liberado un nuevo lanzamiento. Sin embargo, existe la posibilidad de que la versión `master` no sea estable. El equipo trata de mantener la versión `master` operacional y la mayoría de los errores son resueltos en unas cuantas horas o un día. Si encuentras algún problema, por favor abre un [Issue](https://github.com/huggingface/transformers/issues) para que pueda ser corregido más rápido. Verifica si 🤗 Transformers está instalada apropiadamente con el siguiente comando: ```bash python -c "from transformers import pipeline; print(pipeline('sentiment-analysis')('I love you'))" ``` ## Instalación editable Necesitarás una instalación editable si deseas: * Usar la versión `master` del código fuente. * Contribuir a 🤗 Transformers y necesitas probar cambios en el código. Clona el repositorio e instala 🤗 Transformers con los siguientes comandos: ```bash git clone https://github.com/huggingface/transformers.git cd transformers pip install -e . ``` Éstos comandos van a ligar el directorio desde donde clonamos el repositorio al path de las bibliotecas de Python. Python ahora buscará dentro de la carpeta que clonaste además de los paths normales de la biblioteca. Por ejemplo, si los paquetes de Python se encuentran instalados en `~/anaconda3/envs/main/lib/python3.7/site-packages/`, Python también buscará en el directorio desde donde clonamos el repositorio `~/transformers/`. <Tip warning={true}> Debes mantener el directorio `transformers` si deseas seguir usando la biblioteca. </Tip> Puedes actualizar tu copia local a la última versión de 🤗 Transformers con el siguiente comando: ```bash cd ~/transformers/ git pull ``` El entorno de Python que creaste para la instalación de 🤗 Transformers encontrará la versión `master` en la siguiente ejecución. ## Instalación con conda Puedes instalar 🤗 Transformers desde el canal de conda `conda-forge` con el siguiente comando: ```bash conda install conda-forge::transformers ``` ## Configuración de Caché Los modelos preentrenados se descargan y almacenan en caché localmente en: `~/.cache/huggingface/transformers/`. Este es el directorio predeterminado proporcionado por la variable de entorno de shell `TRANSFORMERS_CACHE`. En Windows, el directorio predeterminado es dado por `C:\Users\username\.cache\huggingface\transformers`. Puedes cambiar las variables de entorno de shell que se muestran a continuación, en orden de prioridad, para especificar un directorio de caché diferente: 1. Variable de entorno del shell (por defecto): `TRANSFORMERS_CACHE`. 2. Variable de entorno del shell:`HF_HOME` + `transformers/`. 3. Variable de entorno del shell: `XDG_CACHE_HOME` + `/huggingface/transformers`. <Tip> 🤗 Transformers usará las variables de entorno de shell `PYTORCH_TRANSFORMERS_CACHE` o `PYTORCH_PRETRAINED_BERT_CACHE` si viene de una iteración anterior de la biblioteca y ha configurado esas variables de entorno, a menos que especifiques la variable de entorno de shell `TRANSFORMERS_CACHE`. </Tip> ## Modo Offline 🤗 Transformers puede ejecutarse en un entorno con firewall o fuera de línea (offline) usando solo archivos locales. Configura la variable de entorno `TRANSFORMERS_OFFLINE=1` para habilitar este comportamiento. <Tip> Puedes añadir [🤗 Datasets](https://huggingface.co/docs/datasets/) al flujo de entrenamiento offline declarando la variable de entorno `HF_DATASETS_OFFLINE=1`. </Tip> Por ejemplo, normalmente ejecutarías un programa en una red normal con firewall para instancias externas con el siguiente comando: ```bash python examples/pytorch/translation/run_translation.py --model_name_or_path google-t5/t5-small --dataset_name wmt16 --dataset_config ro-en ... ``` Ejecuta este mismo programa en una instancia offline con el siguiente comando: ```bash HF_DATASETS_OFFLINE=1 TRANSFORMERS_OFFLINE=1 \ python examples/pytorch/translation/run_translation.py --model_name_or_path google-t5/t5-small --dataset_name wmt16 --dataset_config ro-en ... ``` El script ahora debería ejecutarse sin bloquearse ni esperar a que se agote el tiempo de espera porque sabe que solo debe buscar archivos locales. ### Obtener modelos y tokenizers para uso offline Otra opción para usar 🤗 Transformers offline es descargando previamente los archivos y después apuntar al path local donde se encuentren. Hay tres maneras de hacer esto: * Descarga un archivo mediante la interfaz de usuario del [Model Hub](https://huggingface.co/models) haciendo click en el ícono ↓. ![download-icon](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/download-icon.png) * Utiliza el flujo de [`PreTrainedModel.from_pretrained`] y [`PreTrainedModel.save_pretrained`]: 1. Descarga previamente los archivos con [`PreTrainedModel.from_pretrained`]: ```py >>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM >>> tokenizer = AutoTokenizer.from_pretrained("bigscience/T0_3B") >>> model = AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0_3B") ``` 2. Guarda los archivos en un directorio específico con [`PreTrainedModel.save_pretrained`]: ```py >>> tokenizer.save_pretrained("./your/path/bigscience_t0") >>> model.save_pretrained("./your/path/bigscience_t0") ``` 3. Cuando te encuentres offline, recarga los archivos con [`PreTrainedModel.from_pretrained`] desde el directorio especificado: ```py >>> tokenizer = AutoTokenizer.from_pretrained("./your/path/bigscience_t0") >>> model = AutoModel.from_pretrained("./your/path/bigscience_t0") ``` * Descarga de manera programática los archivos con la biblioteca [huggingface_hub](https://github.com/huggingface/huggingface_hub/tree/main/src/huggingface_hub): 1. Instala la biblioteca [huggingface_hub](https://github.com/huggingface/huggingface_hub/tree/main/src/huggingface_hub) en tu entorno virtual: ```bash python -m pip install huggingface_hub ``` 2. Utiliza la función [`hf_hub_download`](https://huggingface.co/docs/hub/adding-a-library#download-files-from-the-hub) para descargar un archivo a un path específico. Por ejemplo, el siguiente comando descarga el archivo `config.json` del modelo [T0](https://huggingface.co/bigscience/T0_3B) al path deseado: ```py >>> from huggingface_hub import hf_hub_download >>> hf_hub_download(repo_id="bigscience/T0_3B", filename="config.json", cache_dir="./your/path/bigscience_t0") ``` Una vez que el archivo se descargue y se almacene en caché localmente, especifica tu ruta local para cargarlo y usarlo: ```py >>> from transformers import AutoConfig >>> config = AutoConfig.from_pretrained("./your/path/bigscience_t0/config.json") ``` <Tip> Para más detalles sobre cómo descargar archivos almacenados en el Hub consulta la sección [How to download files from the Hub](https://huggingface.co/docs/hub/how-to-downstream). </Tip>
transformers/docs/source/es/installation.md/0
{ "file_path": "transformers/docs/source/es/installation.md", "repo_id": "transformers", "token_count": 3639 }
271
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Subtítulos de Imágenes [[open-in-colab]] Los subtítulos de imágenes es la tarea de predecir un subtítulo para una imagen dada. Las aplicaciones comunes en el mundo real incluyen ayudar a personas con discapacidad visual que les puede ayudar a navegar a través de diferentes situaciones. Por lo tanto, los subtítulos de imágenes ayuda a mejorar la accesibilidad del contenido para las personas describiéndoles imágenes. Esta guía te mostrará cómo: * Ajustar un modelo de subtítulos de imágenes. * Usar el modelo ajustado para inferencia. Antes de comenzar, asegúrate de tener todas las bibliotecas necesarias instaladas: ```bash pip install transformers datasets evaluate -q pip install jiwer -q ``` Te animamos a que inicies sesión en tu cuenta de Hugging Face para que puedas subir y compartir tu modelo con la comunidad. Cuando se te solicite, ingresa tu token para iniciar sesión: ```python from huggingface_hub import notebook_login notebook_login() ``` ## Cargar el conjunto de datos de subtítulos BLIP de Pokémon Utiliza la biblioteca 🤗 Dataset para cargar un conjunto de datos que consiste en pares {image-caption}. Para crear tu propio conjunto de datos de subtítulos de imágenes en PyTorch, puedes seguir [este cuaderno](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/GIT/Fine_tune_GIT_on_an_image_captioning_dataset.ipynb). ```python from datasets import load_dataset ds = load_dataset("lambdalabs/pokemon-blip-captions") ds ``` ```bash DatasetDict({ train: Dataset({ features: ['image', 'text'], num_rows: 833 }) }) ``` El conjunto de datos tiene dos características, `image` y `text`. <Tip> Muchos conjuntos de datos de subtítulos de imágenes contienen múltiples subtítulos por imagen. En esos casos, una estrategia común es muestrear aleatoriamente un subtítulo entre los disponibles durante el entrenamiento. </Tip> Divide el conjunto de entrenamiento del conjunto de datos en un conjunto de entrenamiento y de prueba con el método [`~datasets.Dataset.train_test_split`]: ```python ds = ds["train"].train_test_split(test_size=0.1) train_ds = ds["train"] test_ds = ds["test"] ``` Vamos a visualizar un par de muestras del conjunto de entrenamiento. ```python from textwrap import wrap import matplotlib.pyplot as plt import numpy as np def plot_images(images, captions): plt.figure(figsize=(20, 20)) for i in range(len(images)): ax = plt.subplot(1, len(images), i + 1) caption = captions[i] caption = "\n".join(wrap(caption, 12)) plt.title(caption) plt.imshow(images[i]) plt.axis("off") sample_images_to_visualize = [np.array(train_ds[i]["image"]) for i in range(5)] sample_captions = [train_ds[i]["text"] for i in range(5)] plot_images(sample_images_to_visualize, sample_captions) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/sample_training_images_image_cap.png" alt="Sample training images"/> </div> ## Preprocesar el conjunto de datos Dado que el conjunto de datos tiene dos modalidades (imagen y texto), el proceso de preprocesamiento preprocesará las imágenes y los subtítulos. Para hacerlo, carga la clase de procesador asociada con el modelo que estás a punto de ajustar. ```python from transformers import AutoProcessor checkpoint = "microsoft/git-base" processor = AutoProcessor.from_pretrained(checkpoint) ``` El procesador preprocesará internamente la imagen (lo que incluye el cambio de tamaño y la escala de píxeles) y tokenizará el subtítulo. ```python def transforms(example_batch): images = [x for x in example_batch["image"]] captions = [x for x in example_batch["text"]] inputs = processor(images=images, text=captions, padding="max_length") inputs.update({"labels": inputs["input_ids"]}) return inputs train_ds.set_transform(transforms) test_ds.set_transform(transforms) ``` Con el conjunto de datos listo, ahora puedes configurar el modelo para el ajuste fino. ## Cargar un modelo base Carga ["microsoft/git-base"](https://huggingface.co/microsoft/git-base) en un objeto [`AutoModelForCausalLM`](https://huggingface.co/docs/transformers/model_doc/auto#transformers.AutoModelForCausalLM). ```python from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained(checkpoint) ``` ## Evaluar Los modelos de subtítulos de imágenes se evalúan típicamente con el [Rouge Score](https://huggingface.co/spaces/evaluate-metric/rouge) o Tasa de Error de Palabra ([Word Error Rate](https://huggingface.co/spaces/evaluate-metric/wer), por sus siglas en inglés). Para esta guía, utilizarás la Tasa de Error de Palabra (WER). Usamos la biblioteca 🤗 Evaluate para hacerlo. Para conocer las limitaciones potenciales y otros problemas del WER, consulta [esta guía](https://huggingface.co/spaces/evaluate-metric/wer). ```python from evaluate import load import torch wer = load("wer") def compute_metrics(eval_pred): logits, labels = eval_pred predicted = logits.argmax(-1) decoded_labels = processor.batch_decode(labels, skip_special_tokens=True) decoded_predictions = processor.batch_decode(predicted, skip_special_tokens=True) wer_score = wer.compute(predictions=decoded_predictions, references=decoded_labels) return {"wer_score": wer_score} ``` ## ¡Entrenar! Ahora, estás listo para comenzar a ajustar el modelo. Utilizarás el 🤗 [`Trainer`] para esto. Primero, define los argumentos de entrenamiento usando [`TrainingArguments`]. ```python from transformers import TrainingArguments, Trainer model_name = checkpoint.split("/")[1] training_args = TrainingArguments( output_dir=f"{model_name}-pokemon", learning_rate=5e-5, num_train_epochs=50, fp16=True, per_device_train_batch_size=32, per_device_eval_batch_size=32, gradient_accumulation_steps=2, save_total_limit=3, evaluation_strategy="steps", eval_steps=50, save_strategy="steps", save_steps=50, logging_steps=50, remove_unused_columns=False, push_to_hub=True, label_names=["labels"], load_best_model_at_end=True, ) ``` Luego pásalos junto con los conjuntos de datos y el modelo al 🤗 Trainer. ```python trainer = Trainer( model=model, args=training_args, train_dataset=train_ds, eval_dataset=test_ds, compute_metrics=compute_metrics, ) ``` Para comenzar el entrenamiento, simplemente llama a [`~Trainer.train`] en el objeto [`Trainer`]. ```python trainer.train() ``` Deberías ver cómo disminuye suavemente la pérdida de entrenamiento a medida que avanza el entrenamiento. Una vez completado el entrenamiento, comparte tu modelo en el Hub con el método [`~Trainer.push_to_hub`] para que todos puedan usar tu modelo: ```python trainer.push_to_hub() ``` ## Inferencia Toma una imagen de muestra de test_ds para probar el modelo. ```python from PIL import Image import requests url = "https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/pokemon.png" image = Image.open(requests.get(url, stream=True).raw) image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/test_image_image_cap.png" alt="Test image"/> </div> Prepara la imagen para el modelo. ```python device = "cuda" if torch.cuda.is_available() else "cpu" inputs = processor(images=image, return_tensors="pt").to(device) pixel_values = inputs.pixel_values ``` Llama a [`generate`] y decodifica las predicciones. ```python generated_ids = model.generate(pixel_values=pixel_values, max_length=50) generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] print(generated_caption) ``` ```bash a drawing of a pink and blue pokemon ``` ¡Parece que el modelo ajustado generó un subtítulo bastante bueno!
transformers/docs/source/es/tasks/image_captioning.md/0
{ "file_path": "transformers/docs/source/es/tasks/image_captioning.md", "repo_id": "transformers", "token_count": 3231 }
272
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Visite rapide [[open-in-colab]] Soyez opérationnel avec 🤗 Transformers ! Que vous soyez un développeur ou un utilisateur lambda, cette visite rapide vous aidera à démarrer et vous montrera comment utiliser le [`pipeline`] pour l'inférence, charger un modèle pré-entraîné et un préprocesseur avec une [AutoClass](./model_doc/auto), et entraîner rapidement un modèle avec PyTorch ou TensorFlow. Si vous êtes un débutant, nous vous recommandons de consulter nos tutoriels ou notre [cours](https://huggingface.co/course/chapter1/1) suivant pour des explications plus approfondies des concepts présentés ici. Avant de commencer, assurez-vous que vous avez installé toutes les bibliothèques nécessaires : ```bash !pip install transformers datasets ``` Vous aurez aussi besoin d'installer votre bibliothèque d'apprentissage profond favorite : <frameworkcontent> <pt> ```bash pip install torch ``` </pt> <tf> ```bash pip install tensorflow ``` </tf> </frameworkcontent> ## Pipeline <Youtube id="tiZFewofSLM"/> Le [`pipeline`] est le moyen le plus simple d'utiliser un modèle pré-entraîné pour l'inférence. Vous pouvez utiliser le [`pipeline`] prêt à l'emploi pour de nombreuses tâches dans différentes modalités. Consultez le tableau ci-dessous pour connaître les tâches prises en charge : | **Tâche** | **Description** | **Modalité** | **Identifiant du pipeline** | |------------------------------|--------------------------------------------------------------------------------------------------------------|----------------------|-----------------------------------------------| | Classification de texte | Attribue une catégorie à une séquence de texte donnée | Texte | pipeline(task="sentiment-analysis") | | Génération de texte | Génère du texte à partir d'une consigne donnée | Texte | pipeline(task="text-generation") | | Reconnaissance de token nommé | Attribue une catégorie à chaque token dans une séquence (personnes, organisation, localisation, etc.) | Texte | pipeline(task="ner") | | Question réponse | Extrait une réponse du texte en fonction du contexte et d'une question | Texte | pipeline(task="question-answering") | | Prédiction de token masqué | Prédit correctement le token masqué dans une séquence | Texte | pipeline(task="fill-mask") | | Génération de résumé | Génère un résumé d'une séquence de texte donnée ou d'un document | Texte | pipeline(task="summarization") | | Traduction | Traduit du texte d'un langage à un autre | Texte | pipeline(task="translation") | | Classification d'image | Attribue une catégorie à une image | Image | pipeline(task="image-classification") | | Segmentation d'image | Attribue une catégorie à chaque pixel d'une image (supporte la segmentation sémantique, panoptique et d'instance) | Image | pipeline(task="image-segmentation") | | Détection d'objets | Prédit les délimitations et catégories d'objets dans une image | Image | pipeline(task="object-detection") | | Classification d'audio | Attribue une catégorie à un fichier audio | Audio | pipeline(task="audio-classification") | | Reconnaissance automatique de la parole | Extrait le discours d'un fichier audio en texte | Audio | pipeline(task="automatic-speech-recognition") | | Question réponse visuels | Etant données une image et une question, répond correctement à une question sur l'image | Modalités multiples | pipeline(task="vqa") | Commencez par créer une instance de [`pipeline`] et spécifiez la tâche pour laquelle vous souhaitez l'utiliser. Vous pouvez utiliser le [`pipeline`] pour n'importe laquelle des tâches mentionnées dans le tableau précédent. Pour obtenir une liste complète des tâches prises en charge, consultez la documentation de l'[API pipeline](./main_classes/pipelines). Dans ce guide, nous utiliserons le [`pipeline`] pour l'analyse des sentiments à titre d'exemple : ```py >>> from transformers import pipeline >>> classifier = pipeline("sentiment-analysis") ``` Le [`pipeline`] télécharge et stocke en cache un [modèle pré-entraîné](https://huggingface.co/distilbert/distilbert-base-uncased-finetuned-sst-2-english) et un tokenizer par défaut pour l'analyse des sentiments. Vous pouvez maintenant utiliser le `classifier` sur le texte de votre choix : ```py >>> classifier("We are very happy to show you the 🤗 Transformers library.") [{'label': 'POSITIVE', 'score': 0.9998}] ``` Si vous voulez classifier plus qu'un texte, donnez une liste de textes au [`pipeline`] pour obtenir une liste de dictionnaires en retour : ```py >>> results = classifier(["We are very happy to show you the 🤗 Transformers library.", "We hope you don't hate it."]) >>> for result in results: ... print(f"label: {result['label']}, avec le score de: {round(result['score'], 4)}") label: POSITIVE, avec le score de: 0.9998 label: NEGATIVE, avec le score de: 0.5309 ``` Le [`pipeline`] peut aussi itérer sur un jeu de données entier pour n'importe quelle tâche. Prenons par exemple la reconnaissance automatique de la parole : ```py >>> import torch >>> from transformers import pipeline >>> speech_recognizer = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h") ``` Chargez un jeu de données audio (voir le 🤗 Datasets [Quick Start](https://huggingface.co/docs/datasets/quickstart#audio) pour plus de détails) sur lequel vous souhaitez itérer. Pour cet exemple, nous chargeons le jeu de données [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) : ```py >>> from datasets import load_dataset, Audio >>> dataset = load_dataset("PolyAI/minds14", name="en-US", split="train") # doctest: +IGNORE_RESULT ``` Vous devez vous assurer que le taux d'échantillonnage de l'ensemble de données correspond au taux d'échantillonnage sur lequel [`facebook/wav2vec2-base-960h`](https://huggingface.co/facebook/wav2vec2-base-960h) a été entraîné : ```py >>> dataset = dataset.cast_column("audio", Audio(sampling_rate=speech_recognizer.feature_extractor.sampling_rate)) ``` Les fichiers audio sont automatiquement chargés et rééchantillonnés lors de l'appel de la colonne `"audio"`. Extrayez les tableaux de formes d'ondes brutes des quatre premiers échantillons et passez-les comme une liste au pipeline : ```py >>> result = speech_recognizer(dataset[:4]["audio"]) >>> print([d["text"] for d in result]) ['I WOULD LIKE TO SET UP A JOINT ACCOUNT WITH MY PARTNER HOW DO I PROCEED WITH DOING THAT', "FODING HOW I'D SET UP A JOIN TO HET WITH MY WIFE AND WHERE THE AP MIGHT BE", "I I'D LIKE TOY SET UP A JOINT ACCOUNT WITH MY PARTNER I'M NOT SEEING THE OPTION TO DO IT ON THE AP SO I CALLED IN TO GET SOME HELP CAN I JUST DO IT OVER THE PHONE WITH YOU AND GIVE YOU THE INFORMATION OR SHOULD I DO IT IN THE AP AND I'M MISSING SOMETHING UQUETTE HAD PREFERRED TO JUST DO IT OVER THE PHONE OF POSSIBLE THINGS", 'HOW DO I THURN A JOIN A COUNT'] ``` Pour les ensembles de données plus importants où les entrées sont volumineuses (comme dans les domaines de la parole ou de la vision), utilisez plutôt un générateur au lieu d'une liste pour charger toutes les entrées en mémoire. Pour plus d'informations, consultez la documentation de l'[API pipeline](./main_classes/pipelines). ### Utiliser une autre modèle et tokenizer dans le pipeline Le [`pipeline`] peut être utilisé avec n'importe quel modèle du [Hub](https://huggingface.co/models), ce qui permet d'adapter facilement le [`pipeline`] à d'autres cas d'utilisation. Par exemple, si vous souhaitez un modèle capable de traiter du texte français, utilisez les filtres du Hub pour trouver un modèle approprié. Le premier résultat renvoie un [modèle BERT](https://huggingface.co/nlptown/bert-base-multilingual-uncased-sentiment) multilingue finetuné pour l'analyse des sentiments que vous pouvez utiliser pour le texte français : ```py >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" ``` <frameworkcontent> <pt> Utilisez [`AutoModelForSequenceClassification`] et [`AutoTokenizer`] pour charger le modèle pré-entraîné et le tokenizer adapté (plus de détails sur une `AutoClass` dans la section suivante) : ```py >>> from transformers import AutoTokenizer, AutoModelForSequenceClassification >>> model = AutoModelForSequenceClassification.from_pretrained(model_name) >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` </pt> <tf> Utilisez [`TFAutoModelForSequenceClassification`] et [`AutoTokenizer`] pour charger le modèle pré-entraîné et le tokenizer adapté (plus de détails sur une `TFAutoClass` dans la section suivante) : ```py >>> from transformers import AutoTokenizer, TFAutoModelForSequenceClassification >>> model = TFAutoModelForSequenceClassification.from_pretrained(model_name) >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` </tf> </frameworkcontent> Spécifiez le modèle et le tokenizer dans le [`pipeline`], et utilisez le `classifier` sur le texte en français : ```py >>> classifier = pipeline("sentiment-analysis", model=model, tokenizer=tokenizer) >>> classifier("Nous sommes très heureux de vous présenter la bibliothèque 🤗 Transformers.") [{'label': '5 stars', 'score': 0.7273}] ``` Si vous ne parvenez pas à trouver un modèle adapté à votre cas d'utilisation, vous devrez finetuner un modèle pré-entraîné sur vos données. Jetez un coup d'œil à notre [tutoriel sur le finetuning](./training) pour apprendre comment faire. Enfin, après avoir finetuné votre modèle pré-entraîné, pensez à [partager](./model_sharing) le modèle avec la communauté sur le Hub afin de démocratiser l'apprentissage automatique pour tous ! 🤗 ## AutoClass <Youtube id="AhChOFRegn4"/> Les classes [`AutoModelForSequenceClassification`] et [`AutoTokenizer`] fonctionnent ensemble pour créer un [`pipeline`] comme celui que vous avez utilisé ci-dessus. Une [AutoClass](./model_doc/auto) est un raccourci qui récupère automatiquement l'architecture d'un modèle pré-entraîné à partir de son nom ou de son emplacement. Il vous suffit de sélectionner l'`AutoClass` appropriée à votre tâche et la classe de prétraitement qui lui est associée. Reprenons l'exemple de la section précédente et voyons comment vous pouvez utiliser l'`AutoClass` pour reproduire les résultats du [`pipeline`]. ### AutoTokenizer Un tokenizer est chargé de prétraiter le texte pour en faire un tableau de chiffres qui servira d'entrée à un modèle. De nombreuses règles régissent le processus de tokenisation, notamment la manière de diviser un mot et le niveau auquel les mots doivent être divisés (pour en savoir plus sur la tokenisation, consultez le [résumé](./tokenizer_summary)). La chose la plus importante à retenir est que vous devez instancier un tokenizer avec le même nom de modèle pour vous assurer que vous utilisez les mêmes règles de tokenisation que celles avec lesquelles un modèle a été pré-entraîné. Chargez un tokenizer avec [`AutoTokenizer`] : ```py >>> from transformers import AutoTokenizer >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` Passez votre texte au tokenizer : ```py >>> encoding = tokenizer("We are very happy to show you the 🤗 Transformers library.") >>> print(encoding) {'input_ids': [101, 11312, 10320, 12495, 19308, 10114, 11391, 10855, 10103, 100, 58263, 13299, 119, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} ``` Le tokenizer retourne un dictionnaire contenant : * [input_ids](./glossary#input-ids): la représentation numérique des tokens. * [attention_mask](.glossary#attention-mask): indique quels tokens doivent faire l'objet d'une attention particulière (plus particulièrement les tokens de remplissage). Un tokenizer peut également accepter une liste de textes, et remplir et tronquer le texte pour retourner un échantillon de longueur uniforme : <frameworkcontent> <pt> ```py >>> pt_batch = tokenizer( ... ["We are very happy to show you the 🤗 Transformers library.", "We hope you don't hate it."], ... padding=True, ... truncation=True, ... max_length=512, ... return_tensors="pt", ... ) ``` </pt> <tf> ```py >>> tf_batch = tokenizer( ... ["We are very happy to show you the 🤗 Transformers library.", "We hope you don't hate it."], ... padding=True, ... truncation=True, ... max_length=512, ... return_tensors="tf", ... ) ``` </tf> </frameworkcontent> <Tip> Consultez le tutoriel [prétraitement](./preprocessing) pour plus de détails sur la tokenisation, et sur la manière d'utiliser un [`AutoImageProcessor`], un [`AutoFeatureExtractor`] et un [`AutoProcessor`] pour prétraiter les images, l'audio et les contenus multimodaux. </Tip> ### AutoModel <frameworkcontent> <pt> 🤗 Transformers fournit un moyen simple et unifié de charger des instances pré-entraînées. Cela signifie que vous pouvez charger un [`AutoModel`] comme vous chargeriez un [`AutoTokenizer`]. La seule différence est de sélectionner l'[`AutoModel`] approprié pour la tâche. Pour une classification de texte (ou de séquence de textes), vous devez charger [`AutoModelForSequenceClassification`] : ```py >>> from transformers import AutoModelForSequenceClassification >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> pt_model = AutoModelForSequenceClassification.from_pretrained(model_name) ``` <Tip> Voir le [résumé de la tâche](./task_summary) pour vérifier si elle est prise en charge par une classe [`AutoModel`]. </Tip> Maintenant, passez votre échantillon d'entrées prétraitées directement au modèle. Il vous suffit de décompresser le dictionnaire en ajoutant `**` : ```py >>> pt_outputs = pt_model(**pt_batch) ``` Le modèle produit les activations finales dans l'attribut `logits`. Appliquez la fonction softmax aux `logits` pour récupérer les probabilités : ```py >>> from torch import nn >>> pt_predictions = nn.functional.softmax(pt_outputs.logits, dim=-1) >>> print(pt_predictions) tensor([[0.0021, 0.0018, 0.0115, 0.2121, 0.7725], [0.2084, 0.1826, 0.1969, 0.1755, 0.2365]], grad_fn=<SoftmaxBackward0>) ``` </pt> <tf> 🤗 Transformers fournit un moyen simple et unifié de charger des instances pré-entraînés. Cela signifie que vous pouvez charger un [`TFAutoModel`] comme vous chargeriez un [`AutoTokenizer`]. La seule différence est de sélectionner le [`TFAutoModel`] approprié pour la tâche. Pour une classification de texte (ou de séquence de textes), vous devez charger [`TFAutoModelForSequenceClassification`] : ```py >>> from transformers import TFAutoModelForSequenceClassification >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(model_name) ``` <Tip> Voir le [résumé de la tâche](./task_summary) pour vérifier si elle est prise en charge par une classe [`AutoModel`]. </Tip> Passez maintenant votre échantillon d'entrées prétraitées directement au modèle en passant les clés du dictionnaire directement aux tensors : ```py >>> tf_outputs = tf_model(tf_batch) ``` Le modèle produit les activations finales dans l'attribut `logits`. Appliquez la fonction softmax aux `logits` pour récupérer les probabilités : ```py >>> import tensorflow as tf >>> tf_predictions = tf.nn.softmax(tf_outputs.logits, axis=-1) >>> tf_predictions # doctest: +IGNORE_RESULT ``` </tf> </frameworkcontent> <Tip> Tous les modèles 🤗 Transformers (PyTorch ou TensorFlow) produisent les tensors *avant* la fonction d'activation finale (comme softmax) car la fonction d'activation finale est souvent fusionnée avec le calcul de la perte. Les structures produites par le modèle sont des classes de données spéciales, de sorte que leurs attributs sont autocomplétés dans un environnement de développement. Les structures produites par le modèle se comportent comme un tuple ou un dictionnaire (vous pouvez les indexer avec un entier, une tranche ou une chaîne), auquel cas les attributs qui sont None sont ignorés. </Tip> ### Sauvegarder un modèle <frameworkcontent> <pt> Une fois que votre modèle est finetuné, vous pouvez le sauvegarder avec son tokenizer en utilisant [`PreTrainedModel.save_pretrained`] : ```py >>> pt_save_directory = "./pt_save_pretrained" >>> tokenizer.save_pretrained(pt_save_directory) # doctest: +IGNORE_RESULT >>> pt_model.save_pretrained(pt_save_directory) ``` Lorsque vous voulez réutiliser le modèle, rechargez-le avec [`PreTrainedModel.from_pretrained`] : ```py >>> pt_model = AutoModelForSequenceClassification.from_pretrained("./pt_save_pretrained") ``` </pt> <tf> Une fois que votre modèle est finetuné, vous pouvez le sauvegarder avec son tokenizer en utilisant [`TFPreTrainedModel.save_pretrained`] : ```py >>> tf_save_directory = "./tf_save_pretrained" >>> tokenizer.save_pretrained(tf_save_directory) # doctest: +IGNORE_RESULT >>> tf_model.save_pretrained(tf_save_directory) ``` Lorsque vous voulez réutiliser le modèle, rechargez-le avec [`TFPreTrainedModel.from_pretrained`] : ```py >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained("./tf_save_pretrained") ``` </tf> </frameworkcontent> Une fonctionnalité particulièrement cool 🤗 Transformers est la possibilité d'enregistrer un modèle et de le recharger en tant que modèle PyTorch ou TensorFlow. Le paramètre `from_pt` ou `from_tf` permet de convertir le modèle d'un framework à l'autre : <frameworkcontent> <pt> ```py >>> from transformers import AutoModel >>> tokenizer = AutoTokenizer.from_pretrained(tf_save_directory) >>> pt_model = AutoModelForSequenceClassification.from_pretrained(tf_save_directory, from_tf=True) ``` </pt> <tf> ```py >>> from transformers import TFAutoModel >>> tokenizer = AutoTokenizer.from_pretrained(pt_save_directory) >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(pt_save_directory, from_pt=True) ``` </tf> </frameworkcontent> ## Constructions de modèles personnalisés Vous pouvez modifier la configuration du modèle pour changer la façon dont un modèle est construit. La configuration spécifie les attributs d'un modèle, tels que le nombre de couches ou de têtes d'attention. Vous partez de zéro lorsque vous initialisez un modèle à partir d'une configuration personnalisée. Les attributs du modèle sont initialisés de manière aléatoire et vous devrez entraîner le modèle avant de pouvoir l'utiliser pour obtenir des résultats significatifs. Commencez par importer [`AutoConfig`], puis chargez le modèle pré-entraîné que vous voulez modifier. Dans [`AutoConfig.from_pretrained`], vous pouvez spécifier l'attribut que vous souhaitez modifier, tel que le nombre de têtes d'attention : ```py >>> from transformers import AutoConfig >>> my_config = AutoConfig.from_pretrained("distilbert/distilbert-base-uncased", n_heads=12) ``` <frameworkcontent> <pt> Créez un modèle personnalisé à partir de votre configuration avec [`AutoModel.from_config`] : ```py >>> from transformers import AutoModel >>> my_model = AutoModel.from_config(my_config) ``` </pt> <tf> Créez un modèle personnalisé à partir de votre configuration avec [`TFAutoModel.from_config`] : ```py >>> from transformers import TFAutoModel >>> my_model = TFAutoModel.from_config(my_config) ``` </tf> </frameworkcontent> Consultez le guide [Créer une architecture personnalisée](./create_a_model) pour plus d'informations sur la création de configurations personnalisées. ## Trainer - une boucle d'entraînement optimisée par PyTorch Tous les modèles sont des [`torch.nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) standard, vous pouvez donc les utiliser dans n'importe quelle boucle d'entraînement typique. Bien que vous puissiez écrire votre propre boucle d'entraînement, 🤗 Transformers fournit une classe [`Trainer`] pour PyTorch, qui contient la boucle d'entraînement de base et ajoute des fonctionnalités supplémentaires comme l'entraînement distribué, la précision mixte, et plus encore. En fonction de votre tâche, vous passerez généralement les paramètres suivants à [`Trainer`] : 1. Un [`PreTrainedModel`] ou un [`torch.nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module): ```py >>> from transformers import AutoModelForSequenceClassification >>> model = AutoModelForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` 2. [`TrainingArguments`] contient les hyperparamètres du modèle que vous pouvez changer comme le taux d'apprentissage, la taille de l'échantillon, et le nombre d'époques pour s'entraîner. Les valeurs par défaut sont utilisées si vous ne spécifiez pas d'hyperparamètres d'apprentissage : ```py >>> from transformers import TrainingArguments >>> training_args = TrainingArguments( ... output_dir="path/to/save/folder/", ... learning_rate=2e-5, ... per_device_train_batch_size=8, ... per_device_eval_batch_size=8, ... num_train_epochs=2, ... ) ``` 3. Une classe de prétraitement comme un tokenizer, un processeur d'images ou un extracteur de caractéristiques : ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased") ``` 4. Chargez un jeu de données : ```py >>> from datasets import load_dataset >>> dataset = load_dataset("rotten_tomatoes") # doctest: +IGNORE_RESULT ``` 5. Créez une fonction qui transforme le texte du jeu de données en token : ```py >>> def tokenize_dataset(dataset): ... return tokenizer(dataset["text"]) ``` Puis appliquez-la à l'intégralité du jeu de données avec [`~datasets.Dataset.map`]: ```py >>> dataset = dataset.map(tokenize_dataset, batched=True) ``` 6. Un [`DataCollatorWithPadding`] pour créer un échantillon d'exemples à partir de votre jeu de données : ```py >>> from transformers import DataCollatorWithPadding >>> data_collator = DataCollatorWithPadding(tokenizer=tokenizer) ``` Maintenant, rassemblez tous ces éléments dans un [`Trainer`] : ```py >>> from transformers import Trainer >>> trainer = Trainer( ... model=model, ... args=training_args, ... train_dataset=dataset["train"], ... eval_dataset=dataset["test"], ... tokenizer=tokenizer, ... data_collator=data_collator, ... ) # doctest: +SKIP ``` Une fois que vous êtes prêt, appelez la fonction [`~Trainer.train`] pour commencer l'entraînement : ```py >>> trainer.train() # doctest: +SKIP ``` <Tip> Pour les tâches - comme la traduction ou la génération de résumé - qui utilisent un modèle séquence à séquence, utilisez plutôt les classes [`Seq2SeqTrainer`] et [`Seq2SeqTrainingArguments`]. </Tip> Vous pouvez personnaliser le comportement de la boucle d'apprentissage en redéfinissant les méthodes à l'intérieur de [`Trainer`]. Cela vous permet de personnaliser des caractéristiques telles que la fonction de perte, l'optimiseur et le planificateur. Consultez la documentation de [`Trainer`] pour savoir quelles méthodes peuvent être redéfinies. L'autre moyen de personnaliser la boucle d'apprentissage est d'utiliser les [Callbacks](./main_classes/callbacks). Vous pouvez utiliser les callbacks pour intégrer d'autres bibliothèques et inspecter la boucle d'apprentissage afin de suivre la progression ou d'arrêter l'apprentissage plus tôt. Les callbacks ne modifient rien dans la boucle d'apprentissage elle-même. Pour personnaliser quelque chose comme la fonction de perte, vous devez redéfinir le [`Trainer`] à la place. ## Entraînement avec TensorFlow Tous les modèles sont des modèles standard [`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) afin qu'ils puissent être entraînés avec TensorFlow avec l'API [Keras](https://keras.io/). 🤗 Transformers fournit la fonction [`~TFPreTrainedModel.prepare_tf_dataset`] pour charger facilement votre jeu de données comme un `tf.data.Dataset` afin que vous puissiez commencer l'entraînement immédiatement avec les fonctions [`compile`](https://keras.io/api/models/model_training_apis/#compile-method) et [`fit`](https://keras.io/api/models/model_training_apis/#fit-method) de Keras. 1. Vous commencez avec un modèle [`TFPreTrainedModel`] ou [`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) : ```py >>> from transformers import TFAutoModelForSequenceClassification >>> model = TFAutoModelForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` 2. Une classe de prétraitement comme un tokenizer, un processeur d'images ou un extracteur de caractéristiques : ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased") ``` 3. Créez une fonction qui transforme le texte du jeu de données en token : ```py >>> def tokenize_dataset(dataset): ... return tokenizer(dataset["text"]) # doctest: +SKIP ``` 4. Appliquez le tokenizer à l'ensemble du jeu de données avec [`~datasets.Dataset.map`] et passez ensuite le jeu de données et le tokenizer à [`~TFPreTrainedModel.prepare_tf_dataset`]. Vous pouvez également modifier la taille de l'échantillon et mélanger le jeu de données ici si vous le souhaitez : ```py >>> dataset = dataset.map(tokenize_dataset) # doctest: +SKIP >>> tf_dataset = model.prepare_tf_dataset( ... dataset, batch_size=16, shuffle=True, tokenizer=tokenizer ... ) # doctest: +SKIP ``` 5. Une fois que vous êtes prêt, appelez les fonctions `compile` et `fit` pour commencer l'entraînement : ```py >>> from tensorflow.keras.optimizers import Adam >>> model.compile(optimizer=Adam(3e-5)) >>> model.fit(dataset) # doctest: +SKIP ``` ## Et après ? Maintenant que vous avez terminé la visite rapide de 🤗 Transformers, consultez nos guides et apprenez à faire des choses plus spécifiques comme créer un modèle personnalisé, finetuner un modèle pour une tâche, et comment entraîner un modèle avec un script. Si vous souhaitez en savoir plus sur les concepts fondamentaux de 🤗 Transformers, jetez un œil à nos guides conceptuels !
transformers/docs/source/fr/quicktour.md/0
{ "file_path": "transformers/docs/source/fr/quicktour.md", "repo_id": "transformers", "token_count": 10739 }
273
<!--- Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Installazione Installa 🤗 Transformers per qualsiasi libreria di deep learning con cui stai lavorando, imposta la tua cache, e opzionalmente configura 🤗 Transformers per l'esecuzione offline. 🤗 Transformers è testato su Python 3.6+, PyTorch 1.1.0+, TensorFlow 2.0+, e Flax. Segui le istruzioni di installazione seguenti per la libreria di deep learning che stai utilizzando: * [PyTorch](https://pytorch.org/get-started/locally/) istruzioni di installazione. * [TensorFlow 2.0](https://www.tensorflow.org/install/pip) istruzioni di installazione. * [Flax](https://flax.readthedocs.io/en/latest/) istruzioni di installazione. ## Installazione con pip Puoi installare 🤗 Transformers in un [ambiente virtuale](https://docs.python.org/3/library/venv.html). Se non sei familiare con gli ambienti virtuali in Python, dai un'occhiata a questa [guida](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/). Un ambiente virtuale rende più semplice la gestione di progetti differenti, evitando problemi di compatibilità tra dipendenze. Inizia creando un ambiente virtuale nella directory del tuo progetto: ```bash python -m venv .env ``` Attiva l'ambiente virtuale: ```bash source .env/bin/activate ``` Ora puoi procedere con l'installazione di 🤗 Transformers eseguendo il comando seguente: ```bash pip install transformers ``` Per il solo supporto della CPU, puoi installare facilmente 🤗 Transformers e una libreria di deep learning in solo una riga. Ad esempio, installiamo 🤗 Transformers e PyTorch con: ```bash pip install transformers[torch] ``` 🤗 Transformers e TensorFlow 2.0: ```bash pip install transformers[tf-cpu] ``` 🤗 Transformers e Flax: ```bash pip install transformers[flax] ``` Infine, verifica se 🤗 Transformers è stato installato in modo appropriato eseguendo il seguente comando. Questo scaricherà un modello pre-allenato: ```bash python -c "from transformers import pipeline; print(pipeline('sentiment-analysis')('we love you'))" ``` Dopodiché stampa l'etichetta e il punteggio: ```bash [{'label': 'POSITIVE', 'score': 0.9998704791069031}] ``` ## Installazione dalla fonte Installa 🤗 Transformers dalla fonte con il seguente comando: ```bash pip install git+https://github.com/huggingface/transformers ``` Questo comando installa la versione `main` più attuale invece dell'ultima versione stabile. Questo è utile per stare al passo con gli ultimi sviluppi. Ad esempio, se un bug è stato sistemato da quando è uscita l'ultima versione ufficiale ma non è stata ancora rilasciata una nuova versione. Tuttavia, questo significa che questa versione `main` può non essere sempre stabile. Ci sforziamo per mantenere la versione `main` operativa, e la maggior parte dei problemi viene risolta in poche ore o in un giorno. Se riscontri un problema, per favore apri una [Issue](https://github.com/huggingface/transformers/issues) così possiamo sistemarlo ancora più velocemente! Controlla se 🤗 Transformers è stata installata in modo appropriato con il seguente comando: ```bash python -c "from transformers import pipeline; print(pipeline('sentiment-analysis')('I love you'))" ``` ## Installazione modificabile Hai bisogno di un'installazione modificabile se vuoi: * Usare la versione `main` del codice dalla fonte. * Contribuire a 🤗 Transformers e hai bisogno di testare i cambiamenti nel codice. Clona il repository e installa 🤗 Transformers con i seguenti comandi: ```bash git clone https://github.com/huggingface/transformers.git cd transformers pip install -e . ``` Questi comandi collegheranno la cartella in cui è stato clonato il repository e i path delle librerie Python. Python guarderà ora all'interno della cartella clonata, oltre ai normali path delle librerie. Per esempio, se i tuoi pacchetti Python sono installati tipicamente in `~/anaconda3/envs/main/lib/python3.7/site-packages/`, Python cercherà anche nella cartella clonata: `~/transformers/`. <Tip warning={true}> Devi tenere la cartella `transformers` se vuoi continuare ad utilizzare la libreria. </Tip> Ora puoi facilmente aggiornare il tuo clone all'ultima versione di 🤗 Transformers con il seguente comando: ```bash cd ~/transformers/ git pull ``` Il tuo ambiente Python troverà la versione `main` di 🤗 Transformers alla prossima esecuzione. ## Installazione con conda Installazione dal canale conda `conda-forge`: ```bash conda install conda-forge::transformers ``` ## Impostazione della cache I modelli pre-allenati sono scaricati e memorizzati localmente nella cache in: `~/.cache/huggingface/transformers/`. Questa è la directory di default data dalla variabile d'ambiente della shell `TRANSFORMERS_CACHE`. Su Windows, la directory di default è data da `C:\Users\username\.cache\huggingface\transformers`. Puoi cambiare le variabili d'ambiente della shell indicate in seguito, in ordine di priorità, per specificare una directory differente per la cache: 1. Variabile d'ambiente della shell (default): `TRANSFORMERS_CACHE`. 2. Variabile d'ambiente della shell: `HF_HOME` + `transformers/`. 3. Variabile d'ambiente della shell: `XDG_CACHE_HOME` + `/huggingface/transformers`. <Tip> 🤗 Transformers utilizzerà le variabili d'ambiente della shell `PYTORCH_TRANSFORMERS_CACHE` o `PYTORCH_PRETRAINED_BERT_CACHE` se si proviene da un'iterazione precedente di questa libreria e sono state impostate queste variabili d'ambiente, a meno che non si specifichi la variabile d'ambiente della shell `TRANSFORMERS_CACHE`. </Tip> ## Modalità Offline 🤗 Transformers può essere eseguita in un ambiente firewalled o offline utilizzando solo file locali. Imposta la variabile d'ambiente `TRANSFORMERS_OFFLINE=1` per abilitare questo comportamento. <Tip> Aggiungi [🤗 Datasets](https://huggingface.co/docs/datasets/) al tuo flusso di lavoro offline di training impostando la variabile d'ambiente `HF_DATASETS_OFFLINE=1`. </Tip> Ad esempio, in genere si esegue un programma su una rete normale, protetta da firewall per le istanze esterne, con il seguente comando: ```bash python examples/pytorch/translation/run_translation.py --model_name_or_path google-t5/t5-small --dataset_name wmt16 --dataset_config ro-en ... ``` Esegui lo stesso programma in un'istanza offline con: ```bash HF_DATASETS_OFFLINE=1 TRANSFORMERS_OFFLINE=1 \ python examples/pytorch/translation/run_translation.py --model_name_or_path google-t5/t5-small --dataset_name wmt16 --dataset_config ro-en ... ``` Lo script viene ora eseguito senza bloccarsi o attendere il timeout, perché sa di dover cercare solo file locali. ### Ottenere modelli e tokenizer per l'uso offline Un'altra opzione per utilizzare offline 🤗 Transformers è scaricare i file in anticipo, e poi puntare al loro path locale quando hai la necessità di utilizzarli offline. Ci sono tre modi per fare questo: * Scarica un file tramite l'interfaccia utente sul [Model Hub](https://huggingface.co/models) premendo sull'icona ↓. ![download-icon](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/download-icon.png) * Utilizza il flusso [`PreTrainedModel.from_pretrained`] e [`PreTrainedModel.save_pretrained`]: 1. Scarica i tuoi file in anticipo con [`PreTrainedModel.from_pretrained`]: ```py >>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM >>> tokenizer = AutoTokenizer.from_pretrained("bigscience/T0_3B") >>> model = AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0_3B") ``` 2. Salva i tuoi file in una directory specificata con [`PreTrainedModel.save_pretrained`]: ```py >>> tokenizer.save_pretrained("./il/tuo/path/bigscience_t0") >>> model.save_pretrained("./il/tuo/path/bigscience_t0") ``` 3. Ora quando sei offline, carica i tuoi file con [`PreTrainedModel.from_pretrained`] dalla directory specificata: ```py >>> tokenizer = AutoTokenizer.from_pretrained("./il/tuo/path/bigscience_t0") >>> model = AutoModel.from_pretrained("./il/tuo/path/bigscience_t0") ``` * Scarica in maniera programmatica i file con la libreria [huggingface_hub](https://github.com/huggingface/huggingface_hub/tree/main/src/huggingface_hub): 1. Installa la libreria `huggingface_hub` nel tuo ambiente virtuale: ```bash python -m pip install huggingface_hub ``` 2. Utilizza la funzione [`hf_hub_download`](https://huggingface.co/docs/hub/adding-a-library#download-files-from-the-hub) per scaricare un file in un path specifico. Per esempio, il seguente comando scarica il file `config.json` dal modello [T0](https://huggingface.co/bigscience/T0_3B) nel path che desideri: ```py >>> from huggingface_hub import hf_hub_download >>> hf_hub_download(repo_id="bigscience/T0_3B", filename="config.json", cache_dir="./il/tuo/path/bigscience_t0") ``` Una volta che il tuo file è scaricato e salvato in cache localmente, specifica il suo path locale per caricarlo e utilizzarlo: ```py >>> from transformers import AutoConfig >>> config = AutoConfig.from_pretrained("./il/tuo/path/bigscience_t0/config.json") ``` <Tip> Fai riferimento alla sezione [How to download files from the Hub](https://huggingface.co/docs/hub/how-to-downstream) per avere maggiori dettagli su come scaricare modelli presenti sull Hub. </Tip>
transformers/docs/source/it/installation.md/0
{ "file_path": "transformers/docs/source/it/installation.md", "repo_id": "transformers", "token_count": 3585 }
274
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Quick tour [[open-in-colab]] Entra in azione con 🤗 Transformers! Inizia utilizzando [`pipeline`] per un'inferenza veloce, carica un modello pre-allenato e un tokenizer con una [AutoClass](./model_doc/auto) per risolvere i tuoi compiti legati a testo, immagini o audio. <Tip> Tutti gli esempi di codice presenti in questa documentazione hanno un pulsante in alto a sinistra che permette di selezionare tra PyTorch e TensorFlow. Se questo non è presente, ci si aspetta che il codice funzioni per entrambi i backend senza alcun cambiamento. </Tip> ## Pipeline [`pipeline`] è il modo più semplice per utilizzare un modello pre-allenato per un dato compito. <Youtube id="tiZFewofSLM"/> La [`pipeline`] supporta molti compiti comuni: **Testo**: * Analisi del Sentimento (Sentiment Analysis, in inglese): classifica la polarità di un testo dato. * Generazione del Testo (Text Generation, in inglese): genera del testo a partire da un dato input. * Riconoscimento di Entità (Name Entity Recognition o NER, in inglese): etichetta ogni parola con l'entità che questa rappresenta (persona, data, luogo, ecc.). * Rispondere a Domande (Question answering, in inglese): estrae la risposta da un contesto, dato del contesto e una domanda. * Riempimento di Maschere (Fill-mask, in inglese): riempie gli spazi mancanti in un testo che ha parole mascherate. * Riassumere (Summarization, in inglese): genera una sintesi di una lunga sequenza di testo o di un documento. * Traduzione (Translation, in inglese): traduce un testo in un'altra lingua. * Estrazione di Caratteristiche (Feature Extraction, in inglese): crea un tensore che rappresenta un testo. **Immagini**: * Classificazione di Immagini (Image Classification, in inglese): classifica un'immagine. * Segmentazione di Immagini (Image Segmentation, in inglese): classifica ogni pixel di un'immagine. * Rilevazione di Oggetti (Object Detection, in inglese): rileva oggetti all'interno di un'immagine. **Audio**: * Classificazione di Audio (Audio Classification, in inglese): assegna un'etichetta ad un segmento di audio dato. * Riconoscimento Vocale Automatico (Automatic Speech Recognition o ASR, in inglese): trascrive il contenuto di un audio dato in un testo. <Tip> Per maggiori dettagli legati alla [`pipeline`] e ai compiti ad essa associati, fai riferimento alla documentazione [qui](./main_classes/pipelines). </Tip> ### Utilizzo della Pipeline Nel seguente esempio, utilizzerai la [`pipeline`] per l'analisi del sentimento. Installa le seguenti dipendenze se non lo hai già fatto: <frameworkcontent> <pt> ```bash pip install torch ``` </pt> <tf> ```bash pip install tensorflow ``` </tf> </frameworkcontent> Importa [`pipeline`] e specifica il compito che vuoi completare: ```py >>> from transformers import pipeline >>> classificatore = pipeline("sentiment-analysis", model="MilaNLProc/feel-it-italian-sentiment") ``` La pipeline scarica e salva il [modello pre-allenato](https://huggingface.co/MilaNLProc/feel-it-italian-sentiment) e il tokenizer per l'analisi del sentimento. Se non avessimo scelto un modello, la pipeline ne avrebbe scelto uno di default. Ora puoi utilizzare il `classifier` sul tuo testo obiettivo: ```py >>> classificatore("Siamo molto felici di mostrarti la libreria 🤗 Transformers.") [{'label': 'positive', 'score': 0.9997}] ``` Per più di una frase, passa una lista di frasi alla [`pipeline`] la quale restituirà una lista di dizionari: ```py >>> risultati = classificatore( ... ["Siamo molto felici di mostrarti la libreria 🤗 Transformers.", "Speriamo te non la odierai."] ... ) >>> for risultato in risultati: ... print(f"etichetta: {risultato['label']}, con punteggio: {round(risultato['score'], 4)}") etichetta: positive, con punteggio: 0.9998 etichetta: negative, con punteggio: 0.9998 ``` La [`pipeline`] può anche iterare su un dataset intero. Inizia installando la libreria [🤗 Datasets](https://huggingface.co/docs/datasets/): ```bash pip install datasets ``` Crea una [`pipeline`] con il compito che vuoi risolvere e con il modello che vuoi utilizzare. ```py >>> import torch >>> from transformers import pipeline >>> riconoscitore_vocale = pipeline( ... "automatic-speech-recognition", model="radiogroup-crits/wav2vec2-xls-r-1b-italian-doc4lm-5gram" ... ) ``` Poi, carica un dataset (vedi 🤗 Datasets [Quick Start](https://huggingface.co/docs/datasets/quickstart) per maggiori dettagli) sul quale vuoi iterare. Per esempio, carichiamo il dataset [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14): ```py >>> from datasets import load_dataset, Audio >>> dataset = load_dataset("PolyAI/minds14", name="it-IT", split="train") # doctest: +IGNORE_RESULT ``` Dobbiamo assicurarci che la frequenza di campionamento del set di dati corrisponda alla frequenza di campionamento con cui è stato addestrato `radiogroup-crits/wav2vec2-xls-r-1b-italian-doc4lm-5gram`. ```py >>> dataset = dataset.cast_column("audio", Audio(sampling_rate=riconoscitore_vocale.feature_extractor.sampling_rate)) ``` I file audio vengono caricati automaticamente e ri-campionati quando chiamiamo la colonna "audio". Estraiamo i vettori delle forme d'onda grezze delle prime 4 osservazioni e passiamoli come lista alla pipeline: ```py >>> risultato = riconoscitore_vocale(dataset[:4]["audio"]) >>> print([d["text"] for d in risultato]) ['dovrei caricare dei soldi sul mio conto corrente', 'buongiorno e senza vorrei depositare denaro sul mio conto corrente come devo fare per cortesia', 'sì salve vorrei depositare del denaro sul mio conto', 'e buon pomeriggio vorrei depositare dei soldi sul mio conto bancario volleo sapere come posso fare se e posso farlo online ed un altro conto o andandoo tramite bancomut'] ``` Per un dataset più grande dove gli input sono di dimensione maggiore (come nel parlato/audio o nella visione), dovrai passare un generatore al posto di una lista che carica tutti gli input in memoria. Guarda la [documentazione della pipeline](./main_classes/pipelines) per maggiori informazioni. ### Utilizzare un altro modello e tokenizer nella pipeline La [`pipeline`] può ospitare qualsiasi modello del [Model Hub](https://huggingface.co/models), rendendo semplice l'adattamento della [`pipeline`] per altri casi d'uso. Per esempio, se si vuole un modello capace di trattare testo in francese, usa i tag presenti nel Model Hub in modo da filtrare per ottenere un modello appropriato. Il miglior risultato filtrato restituisce un modello multi-lingua [BERT model](https://huggingface.co/nlptown/bert-base-multilingual-uncased-sentiment) fine-tuned per l'analisi del sentimento. Ottimo, utilizziamo questo modello! ```py >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" ``` <frameworkcontent> <pt> Usa [`AutoModelForSequenceClassification`] e [`AutoTokenizer`] per caricare il modello pre-allenato e il suo tokenizer associato (maggiori informazioni su una `AutoClass` in seguito): ```py >>> from transformers import AutoTokenizer, AutoModelForSequenceClassification >>> model = AutoModelForSequenceClassification.from_pretrained(model_name) >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` </pt> <tf> Usa [`TFAutoModelForSequenceClassification`] e [`AutoTokenizer`] per caricare il modello pre-allenato e il suo tokenizer associato (maggiori informazioni su una `TFAutoClass` in seguito): ```py >>> from transformers import AutoTokenizer, TFAutoModelForSequenceClassification >>> model = TFAutoModelForSequenceClassification.from_pretrained(model_name) >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` </tf> </frameworkcontent> Poi puoi specificare il modello e il tokenizer nella [`pipeline`], e applicare il `classifier` sul tuo testo obiettivo: ```py >>> classifier = pipeline("sentiment-analysis", model=model, tokenizer=tokenizer) >>> classifier("Nous sommes très heureux de vous présenter la bibliothèque 🤗 Transformers.") [{'label': '5 stars', 'score': 0.7273}] ``` Se non riesci a trovare un modello per il tuo caso d'uso, dovrai fare fine-tuning di un modello pre-allenato sui tuoi dati. Dai un'occhiata al nostro tutorial [fine-tuning tutorial](./training) per imparare come. Infine, dopo che hai completato il fine-tuning del tuo modello pre-allenato, considera per favore di condividerlo (vedi il tutorial [qui](./model_sharing)) con la comunità sul Model Hub per democratizzare l'NLP! 🤗 ## AutoClass <Youtube id="AhChOFRegn4"/> Al suo interno, le classi [`AutoModelForSequenceClassification`] e [`AutoTokenizer`] lavorano assieme per dare potere alla [`pipeline`]. Una [AutoClass](./model_doc/auto) è una scorciatoia che automaticamente recupera l'architettura di un modello pre-allenato a partire dal suo nome o path. Hai solo bisogno di selezionare la `AutoClass` appropriata per il tuo compito e il suo tokenizer associato con [`AutoTokenizer`]. Ritorniamo al nostro esempio e vediamo come puoi utilizzare la `AutoClass` per replicare i risultati della [`pipeline`]. ### AutoTokenizer Un tokenizer è responsabile dell'elaborazione del testo in modo da trasformarlo in un formato comprensibile dal modello. Per prima cosa, il tokenizer dividerà il testo in parole chiamate *token*. Ci sono diverse regole che governano il processo di tokenizzazione, tra cui come dividere una parola e a quale livello (impara di più sulla tokenizzazione [qui](./tokenizer_summary)). La cosa più importante da ricordare comunque è che hai bisogno di inizializzare il tokenizer con lo stesso nome del modello in modo da assicurarti che stai utilizzando le stesse regole di tokenizzazione con cui il modello è stato pre-allenato. Carica un tokenizer con [`AutoTokenizer`]: ```py >>> from transformers import AutoTokenizer >>> nome_del_modello = "nlptown/bert-base-multilingual-uncased-sentiment" >>> tokenizer = AutoTokenizer.from_pretrained(nome_del_modello) ``` Dopodiché, il tokenizer converte i token in numeri in modo da costruire un tensore come input del modello. Questo è conosciuto come il *vocabolario* del modello. Passa il tuo testo al tokenizer: ```py >>> encoding = tokenizer("Siamo molto felici di mostrarti la libreria 🤗 Transformers.") >>> print(encoding) {'input_ids': [101, 56821, 10132, 14407, 13019, 13007, 10120, 47201, 10330, 10106, 91686, 100, 58263, 119, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} ``` Il tokenizer restituirà un dizionario contenente: * [input_ids](./glossary#input-ids): rappresentazioni numeriche dei tuoi token. * [attention_mask](.glossary#attention-mask): indica quali token devono essere presi in considerazione. Come con la [`pipeline`], il tokenizer accetterà una lista di input. In più, il tokenizer può anche completare (pad, in inglese) e troncare il testo in modo da restituire un lotto (batch, in inglese) di lunghezza uniforme: <frameworkcontent> <pt> ```py >>> pt_batch = tokenizer( ... ["Siamo molto felici di mostrarti la libreria 🤗 Transformers.", "Speriamo te non la odierai."], ... padding=True, ... truncation=True, ... max_length=512, ... return_tensors="pt", ... ) ``` </pt> <tf> ```py >>> tf_batch = tokenizer( ... ["Siamo molto felici di mostrarti la libreria 🤗 Transformers.", "Speriamo te non la odierai."], ... padding=True, ... truncation=True, ... max_length=512, ... return_tensors="tf", ... ) ``` </tf> </frameworkcontent> Leggi il tutorial sul [preprocessing](./preprocessing) per maggiori dettagli sulla tokenizzazione. ### AutoModel <frameworkcontent> <pt> 🤗 Transformers fornisce un metodo semplice e unificato per caricare istanze pre-allenate. Questo significa che puoi caricare un [`AutoModel`] come caricheresti un [`AutoTokenizer`]. L'unica differenza è selezionare l'[`AutoModel`] corretto per il compito di interesse. Dato che stai facendo classificazione di testi, o sequenze, carica [`AutoModelForSequenceClassification`]: ```py >>> from transformers import AutoModelForSequenceClassification >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> pt_model = AutoModelForSequenceClassification.from_pretrained(model_name) ``` <Tip> Guarda il [task summary](./task_summary) per sapere quale classe di [`AutoModel`] utilizzare per quale compito. </Tip> Ora puoi passare il tuo lotto di input pre-processati direttamente al modello. Devi solo spacchettare il dizionario aggiungendo `**`: ```py >>> pt_outputs = pt_model(**pt_batch) ``` Il modello produrrà le attivazioni finali nell'attributo `logits`. Applica la funzione softmax a `logits` per ottenere le probabilità: ```py >>> from torch import nn >>> pt_predictions = nn.functional.softmax(pt_outputs.logits, dim=-1) >>> print(pt_predictions) tensor([[0.0041, 0.0037, 0.0203, 0.2005, 0.7713], [0.3766, 0.3292, 0.1832, 0.0558, 0.0552]], grad_fn=<SoftmaxBackward0>) ``` </pt> <tf> 🤗 Transformers fornisce un metodo semplice e unificato per caricare istanze pre-allenate. Questo significa che puoi caricare un [`TFAutoModel`] come caricheresti un [`AutoTokenizer`]. L'unica differenza è selezionare il [`TFAutoModel`] corretto per il compito di interesse. Dato che stai facendo classificazione di testi, o sequenze, carica [`TFAutoModelForSequenceClassification`]: ```py >>> from transformers import TFAutoModelForSequenceClassification >>> nome_del_modello = "nlptown/bert-base-multilingual-uncased-sentiment" >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(nome_del_modello) ``` <Tip> Guarda il [task summary](./task_summary) per sapere quale classe di [`AutoModel`] utilizzare per quale compito. </Tip> Ora puoi passare il tuo lotto di input pre-processati direttamente al modello passando le chiavi del dizionario al tensore: ```py >>> tf_outputs = tf_model(tf_batch) ``` Il modello produrrà le attivazioni finali nell'attributo `logits`. Applica la funzione softmax a `logits` per ottenere le probabilità: ```py >>> import tensorflow as tf >>> tf_predictions = tf.nn.softmax(tf_outputs.logits, axis=-1) >>> tf_predictions # doctest: +IGNORE_RESULT ``` </tf> </frameworkcontent> <Tip> Tutti i modelli di 🤗 Transformers (PyTorch e TensorFlow) restituiscono i tensori *prima* della funzione finale di attivazione (come la softmax) perché la funzione di attivazione finale viene spesso unita a quella di perdita. </Tip> I modelli sono [`torch.nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) o [`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) standard così puoi utilizzarli all'interno del tuo training loop usuale. Tuttavia, per rendere le cose più semplici, 🤗 Transformers fornisce una classe [`Trainer`] per PyTorch che aggiunge delle funzionalità per l'allenamento distribuito, precisione mista, e altro ancora. Per TensorFlow, puoi utilizzare il metodo `fit` di [Keras](https://keras.io/). Fai riferimento al [tutorial per il training](./training) per maggiori dettagli. <Tip> Gli output del modello di 🤗 Transformers sono delle dataclasses speciali in modo che i loro attributi vengano auto-completati all'interno di un IDE. Gli output del modello si comportano anche come una tupla o un dizionario (ad esempio, puoi indicizzare con un intero, una slice o una stringa) nel qual caso gli attributi che sono `None` vengono ignorati. </Tip> ### Salva un modello <frameworkcontent> <pt> Una volta completato il fine-tuning del tuo modello, puoi salvarlo con il suo tokenizer utilizzando [`PreTrainedModel.save_pretrained`]: ```py >>> pt_save_directory = "./pt_save_pretrained" >>> tokenizer.save_pretrained(pt_save_directory) # doctest: +IGNORE_RESULT >>> pt_model.save_pretrained(pt_save_directory) ``` Quando desideri utilizzare il tuo modello nuovamente, puoi ri-caricarlo con [`PreTrainedModel.from_pretrained`]: ```py >>> pt_model = AutoModelForSequenceClassification.from_pretrained("./pt_save_pretrained") ``` </pt> <tf> Una volta completato il fine-tuning del tuo modello, puoi salvarlo con il suo tokenizer utilizzando [`TFPreTrainedModel.save_pretrained`]: ```py >>> tf_save_directory = "./tf_save_pretrained" >>> tokenizer.save_pretrained(tf_save_directory) # doctest: +IGNORE_RESULT >>> tf_model.save_pretrained(tf_save_directory) ``` Quando desideri utilizzare il tuo modello nuovamente, puoi ri-caricarlo con [`TFPreTrainedModel.from_pretrained`]: ```py >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained("./tf_save_pretrained") ``` </tf> </frameworkcontent> Una caratteristica particolarmente interessante di 🤗 Transformers è la sua abilità di salvare un modello e ri-caricarlo sia come modello di PyTorch che di TensorFlow. I parametri `from_pt` o `from_tf` possono convertire un modello da un framework all'altro: <frameworkcontent> <pt> ```py >>> from transformers import AutoModel >>> tokenizer = AutoTokenizer.from_pretrained(tf_save_directory) >>> pt_model = AutoModelForSequenceClassification.from_pretrained(tf_save_directory, from_tf=True) ``` </pt> <tf> ```py >>> from transformers import TFAutoModel >>> tokenizer = AutoTokenizer.from_pretrained(pt_save_directory) >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(pt_save_directory, from_pt=True) ``` </tf> </frameworkcontent>
transformers/docs/source/it/quicktour.md/0
{ "file_path": "transformers/docs/source/it/quicktour.md", "repo_id": "transformers", "token_count": 6490 }
275
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Sharing custom models 🤗 Transformersライブラリは、簡単に拡張できるように設計されています。すべてのモデルはリポジトリの特定のサブフォルダに完全にコード化されており、抽象化はありません。したがって、モデリングファイルをコピーして調整することが簡単です。 新しいモデルを書いている場合、ゼロから始める方が簡単かもしれません。このチュートリアルでは、カスタムモデルとその設定をどのように書き、Transformers内で使用できるようにし、コードに依存する共同体と共有する方法を説明します。ライブラリに存在しない場合でも、誰でも使用できるようにします。 これを実証するために、[timmライブラリ](https://github.com/rwightman/pytorch-image-models)のResNetクラスを[`PreTrainedModel`]にラップすることによって、ResNetモデルを使用します。 ## Writing a custom configuration モデルに取り組む前に、まずその設定を書きましょう。モデルの設定は、モデルを構築するために必要なすべての情報を含むオブジェクトです。次のセクションで見るように、モデルは初期化するために`config`しか受け取ることができないため、そのオブジェクトができるだけ完全である必要があります。 この例では、ResNetクラスのいくつかの引数を取得し、調整したいかもしれないとします。異なる設定は、異なるタイプのResNetを提供します。その後、これらの引数を確認した後、それらの引数を単に格納します。 ```python from transformers import PretrainedConfig from typing import List class ResnetConfig(PretrainedConfig): model_type = "resnet" def __init__( self, block_type="bottleneck", layers: List[int] = [3, 4, 6, 3], num_classes: int = 1000, input_channels: int = 3, cardinality: int = 1, base_width: int = 64, stem_width: int = 64, stem_type: str = "", avg_down: bool = False, **kwargs, ): if block_type not in ["basic", "bottleneck"]: raise ValueError(f"`block_type` must be 'basic' or bottleneck', got {block_type}.") if stem_type not in ["", "deep", "deep-tiered"]: raise ValueError(f"`stem_type` must be '', 'deep' or 'deep-tiered', got {stem_type}.") self.block_type = block_type self.layers = layers self.num_classes = num_classes self.input_channels = input_channels self.cardinality = cardinality self.base_width = base_width self.stem_width = stem_width self.stem_type = stem_type self.avg_down = avg_down super().__init__(**kwargs) ``` 重要なことを3つ覚えておくべきポイントは次のとおりです: - `PretrainedConfig` を継承する必要があります。 - あなたの `PretrainedConfig` の `__init__` は任意の kwargs を受け入れる必要があります。 - これらの `kwargs` は親クラスの `__init__` に渡す必要があります。 継承は、🤗 Transformers ライブラリのすべての機能を取得できるようにするためです。他の2つの制約は、 `PretrainedConfig` が設定しているフィールド以外にも多くのフィールドを持っていることから来ています。 `from_pretrained` メソッドで設定を再ロードする場合、これらのフィールドはあなたの設定に受け入れられ、 その後、親クラスに送信される必要があります。 設定の `model_type` を定義すること(ここでは `model_type="resnet"`)は、 自動クラスにモデルを登録したい場合を除いては必須ではありません(最後のセクションを参照)。 これで、ライブラリの他のモデル設定と同様に、設定を簡単に作成して保存できます。 以下は、resnet50d 設定を作成して保存する方法の例です: ```py resnet50d_config = ResnetConfig(block_type="bottleneck", stem_width=32, stem_type="deep", avg_down=True) resnet50d_config.save_pretrained("custom-resnet") ``` これにより、`custom-resnet` フォルダ内に `config.json` という名前のファイルが保存されます。その後、`from_pretrained` メソッドを使用して構成を再ロードできます。 ```py resnet50d_config = ResnetConfig.from_pretrained("custom-resnet") ``` また、[`PretrainedConfig`] クラスの他のメソッドを使用することもできます。たとえば、[`~PretrainedConfig.push_to_hub`] を使用して、設定を直接 Hub にアップロードできます。 ## Writing a custom model ResNet の設定ができたので、モデルを書き始めることができます。実際には2つのモデルを書きます。1つはバッチの画像から隠れた特徴を抽出するモデル([`BertModel`] のようなもの)で、もう1つは画像分類に適したモデル([`BertForSequenceClassification`] のようなもの)です。 前述したように、この例をシンプルに保つために、モデルの緩いラッパーのみを書きます。このクラスを書く前に行う必要がある唯一のことは、ブロックタイプと実際のブロッククラスの間のマップです。その後、すべてを `ResNet` クラスに渡して設定からモデルを定義します: ```py from transformers import PreTrainedModel from timm.models.resnet import BasicBlock, Bottleneck, ResNet from .configuration_resnet import ResnetConfig BLOCK_MAPPING = {"basic": BasicBlock, "bottleneck": Bottleneck} class ResnetModel(PreTrainedModel): config_class = ResnetConfig def __init__(self, config): super().__init__(config) block_layer = BLOCK_MAPPING[config.block_type] self.model = ResNet( block_layer, config.layers, num_classes=config.num_classes, in_chans=config.input_channels, cardinality=config.cardinality, base_width=config.base_width, stem_width=config.stem_width, stem_type=config.stem_type, avg_down=config.avg_down, ) def forward(self, tensor): return self.model.forward_features(tensor) ``` 画像を分類するモデルの場合、forwardメソッドを変更するだけです: ```py import torch class ResnetModelForImageClassification(PreTrainedModel): config_class = ResnetConfig def __init__(self, config): super().__init__(config) block_layer = BLOCK_MAPPING[config.block_type] self.model = ResNet( block_layer, config.layers, num_classes=config.num_classes, in_chans=config.input_channels, cardinality=config.cardinality, base_width=config.base_width, stem_width=config.stem_width, stem_type=config.stem_type, avg_down=config.avg_down, ) def forward(self, tensor, labels=None): logits = self.model(tensor) if labels is not None: loss = torch.nn.cross_entropy(logits, labels) return {"loss": loss, "logits": logits} return {"logits": logits} ``` 両方の場合、`PreTrainedModel`から継承し、`config`を使用してスーパークラスの初期化を呼び出します(通常の`torch.nn.Module`を書くときのような感じです)。 `config_class`を設定する行は必須ではありませんが、(最後のセクションを参照)、モデルを自動クラスに登録したい場合に使用できます。 <Tip> モデルがライブラリ内のモデルと非常に似ている場合、このモデルと同じ構成を再利用できます。 </Tip> モデルが返す内容は何でも構いませんが、ラベルが渡されるときに損失を含む辞書を返す(`ResnetModelForImageClassification`のように行ったもの)と、 モデルを[`Trainer`]クラス内で直接使用できるようになります。独自のトレーニングループまたは他のライブラリを使用する予定である限り、 別の出力形式を使用することも問題ありません。 さて、モデルクラスができたので、1つ作成しましょう: ```py resnet50d = ResnetModelForImageClassification(resnet50d_config) ``` 再度、[`PreTrainedModel`]のいずれかのメソッド、例えば[`~PreTrainedModel.save_pretrained`]や [`~PreTrainedModel.push_to_hub`]などを使用できます。次のセクションでは、モデルの重みをコードと一緒に Hugging Face Hub にプッシュする方法を見てみます。 しかし、まずはモデル内に事前学習済みの重みをロードしましょう。 独自のユースケースでは、おそらく独自のデータでカスタムモデルをトレーニングすることになるでしょう。 このチュートリアルではスピードアップのために、resnet50dの事前学習済みバージョンを使用します。 私たちのモデルはそれをラップするだけなので、これらの重みを転送するのは簡単です: ```py import timm pretrained_model = timm.create_model("resnet50d", pretrained=True) resnet50d.model.load_state_dict(pretrained_model.state_dict()) ``` さて、[`~PreTrainedModel.save_pretrained`]または[`~PreTrainedModel.push_to_hub`]を実行したときに、 モデルのコードが保存されるようにする方法を見てみましょう。 ## Sending the code to the Hub <Tip warning={true}> このAPIは実験的であり、次のリリースでわずかな変更があるかもしれません。 </Tip> まず、モデルが`.py`ファイルに完全に定義されていることを確認してください。 ファイルは相対インポートを他のファイルに依存できますが、すべてのファイルが同じディレクトリにある限り(まだこの機能ではサブモジュールはサポートしていません)、問題ありません。 この例では、現在の作業ディレクトリ内に名前が「resnet_model」のフォルダを作成し、その中に`modeling_resnet.py`ファイルと`configuration_resnet.py`ファイルを定義します。 構成ファイルには`ResnetConfig`のコードが含まれ、モデリングファイルには`ResnetModel`と`ResnetModelForImageClassification`のコードが含まれています。 ``` . └── resnet_model ├── __init__.py ├── configuration_resnet.py └── modeling_resnet.py ``` `__init__.py`は空であっても問題ありません。Pythonが`resnet_model`をモジュールとして検出できるようにするために存在します。 <Tip warning={true}> ライブラリからモデリングファイルをコピーする場合、ファイルの先頭にあるすべての相対インポートを`transformers`パッケージからインポートに置き換える必要があります。 </Tip> 既存の設定やモデルを再利用(またはサブクラス化)できることに注意してください。 コミュニティとモデルを共有するために、次の手順に従ってください:まず、新しく作成したファイルからResNetモデルと設定をインポートします: ```py from resnet_model.configuration_resnet import ResnetConfig from resnet_model.modeling_resnet import ResnetModel, ResnetModelForImageClassification ``` 次に、`save_pretrained`メソッドを使用してこれらのオブジェクトのコードファイルをコピーし、特定のAutoクラス(特にモデルの場合)に正しく登録するようライブラリに指示する必要があります。次のように実行します: ```py ResnetConfig.register_for_auto_class() ResnetModel.register_for_auto_class("AutoModel") ResnetModelForImageClassification.register_for_auto_class("AutoModelForImageClassification") ``` 注意: 設定については自動クラスを指定する必要はありません(設定用の自動クラスは1つしかなく、[`AutoConfig`]です)が、 モデルについては異なります。カスタムモデルは多くの異なるタスクに適している可能性があるため、 モデルが正確な自動クラスのうちどれに適しているかを指定する必要があります。 次に、前述のように設定とモデルを作成しましょう: ```py resnet50d_config = ResnetConfig(block_type="bottleneck", stem_width=32, stem_type="deep", avg_down=True) resnet50d = ResnetModelForImageClassification(resnet50d_config) pretrained_model = timm.create_model("resnet50d", pretrained=True) resnet50d.model.load_state_dict(pretrained_model.state_dict()) ``` モデルをHubに送信するには、ログインしていることを確認してください。ターミナルで次のコマンドを実行します: ```bash huggingface-cli login ``` またはノートブックから: ```py from huggingface_hub import notebook_login notebook_login() ``` 次に、次のようにして、独自の名前空間にプッシュできます(または、メンバーである組織にプッシュできます): ```py resnet50d.push_to_hub("custom-resnet50d") ``` モデリングの重みとJSON形式の構成に加えて、このフォルダー「custom-resnet50d」内のモデリングおよび構成「.py」ファイルもコピーされ、結果はHubにアップロードされました。結果はこの[model repo](https://huggingface.co/sgugger/custom-resnet50d)で確認できます。 詳細については、[Hubへのプッシュ方法](model_sharing)を参照してください。 ## Using a model with custom code 自動クラスと `from_pretrained` メソッドを使用して、リポジトリ内のカスタムコードファイルと共に任意の構成、モデル、またはトークナイザを使用できます。 Hubにアップロードされるすべてのファイルとコードはマルウェアのスキャンが実施されます(詳細は[Hubセキュリティ](https://huggingface.co/docs/hub/security#malware-scanning)ドキュメンテーションを参照してください)、しかし、依然として悪意のあるコードを実行しないために、モデルコードと作者を確認する必要があります。 `trust_remote_code=True` を設定してカスタムコードを持つモデルを使用できます: ```py from transformers import AutoModelForImageClassification model = AutoModelForImageClassification.from_pretrained("sgugger/custom-resnet50d", trust_remote_code=True) ``` コミットハッシュを「revision」として渡すことも強く推奨されています。これにより、モデルの作者がコードを悪意のある新しい行で更新しなかったことを確認できます(モデルの作者を完全に信頼している場合を除きます)。 ```py commit_hash = "ed94a7c6247d8aedce4647f00f20de6875b5b292" model = AutoModelForImageClassification.from_pretrained( "sgugger/custom-resnet50d", trust_remote_code=True, revision=commit_hash ) ``` モデルリポジトリのコミット履歴をブラウジングする際には、任意のコミットのコミットハッシュを簡単にコピーできるボタンがあります。 ## Registering a model with custom code to the auto classes 🤗 Transformersを拡張するライブラリを作成している場合、独自のモデルを含めるために自動クラスを拡張したい場合があります。 これはコードをHubにプッシュすることとは異なり、ユーザーはカスタムモデルを取得するためにあなたのライブラリをインポートする必要があります (Hubからモデルコードを自動的にダウンロードするのとは対照的です)。 構成に既存のモデルタイプと異なる `model_type` 属性がある限り、またあなたのモデルクラスが適切な `config_class` 属性を持っている限り、 次のようにそれらを自動クラスに追加できます: ```py from transformers import AutoConfig, AutoModel, AutoModelForImageClassification AutoConfig.register("resnet", ResnetConfig) AutoModel.register(ResnetConfig, ResnetModel) AutoModelForImageClassification.register(ResnetConfig, ResnetModelForImageClassification) ``` 注意: `AutoConfig` にカスタム設定を登録する際の最初の引数は、カスタム設定の `model_type` と一致する必要があります。 また、任意の自動モデルクラスにカスタムモデルを登録する際の最初の引数は、それらのモデルの `config_class` と一致する必要があります。
transformers/docs/source/ja/custom_models.md/0
{ "file_path": "transformers/docs/source/ja/custom_models.md", "repo_id": "transformers", "token_count": 7501 }
276
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Processors Transformers ライブラリでは、プロセッサは 2 つの異なる意味を持ちます。 - [Wav2Vec2](../model_doc/wav2vec2) などのマルチモーダル モデルの入力を前処理するオブジェクト (音声とテキスト) または [CLIP](../model_doc/clip) (テキストとビジョン) - 古いバージョンのライブラリで GLUE または SQUAD のデータを前処理するために使用されていたオブジェクトは非推奨になりました。 ## Multi-modal processors マルチモーダル モデルでは、オブジェクトが複数のモダリティ (テキスト、 視覚と音声)。これは、2 つ以上の処理オブジェクトをグループ化するプロセッサーと呼ばれるオブジェクトによって処理されます。 トークナイザー (テキスト モダリティ用)、画像プロセッサー (視覚用)、特徴抽出器 (オーディオ用) など。 これらのプロセッサは、保存およびロード機能を実装する次の基本クラスを継承します。 [[autodoc]] ProcessorMixin ## Deprecated processors すべてのプロセッサは、同じアーキテクチャに従っています。 [`~data.processors.utils.DataProcessor`]。プロセッサは次のリストを返します。 [`~data.processors.utils.InputExample`]。これら [`~data.processors.utils.InputExample`] は次のように変換できます。 [`~data.processors.utils.Input features`] をモデルにフィードします。 [[autodoc]] data.processors.utils.DataProcessor [[autodoc]] data.processors.utils.InputExample [[autodoc]] data.processors.utils.InputFeatures ## GLUE [一般言語理解評価 (GLUE)](https://gluebenchmark.com/) は、 既存の NLU タスクの多様なセットにわたるモデルのパフォーマンス。紙と同時発売された [GLUE: A 自然言語理解のためのマルチタスクベンチマークおよび分析プラットフォーム](https://openreview.net/pdf?id=rJ4km2R5t7) このライブラリは、MRPC、MNLI、MNLI (不一致)、CoLA、SST2、STSB、 QQP、QNLI、RTE、WNLI。 それらのプロセッサは次のとおりです。 - [`~data.processors.utils.MrpcProcessor`] - [`~data.processors.utils.MnliProcessor`] - [`~data.processors.utils.MnliMismatchedProcessor`] - [`~data.processors.utils.Sst2Processor`] - [`~data.processors.utils.StsbProcessor`] - [`~data.processors.utils.QqpProcessor`] - [`~data.processors.utils.QnliProcessor`] - [`~data.processors.utils.RteProcessor`] - [`~data.processors.utils.WnliProcessor`] さらに、次のメソッドを使用して、データ ファイルから値をロードし、それらをリストに変換することができます。 [`~data.processors.utils.InputExample`]。 [[autodoc]] data.processors.glue.glue_convert_examples_to_features ## XNLI [クロスリンガル NLI コーパス (XNLI)](https://www.nyu.edu/projects/bowman/xnli/) は、 言語を超えたテキスト表現の品質。 XNLI は、[*MultiNLI*](http://www.nyu.edu/projects/bowman/multinli/) に基づくクラウドソースのデータセットです。テキストのペアには、15 個のテキスト含意アノテーションがラベル付けされています。 さまざまな言語 (英語などの高リソース言語とスワヒリ語などの低リソース言語の両方を含む)。 論文 [XNLI: Evaluating Cross-lingual Sentence Representations](https://arxiv.org/abs/1809.05053) と同時にリリースされました。 このライブラリは、XNLI データをロードするプロセッサをホストします。 - [`~data.processors.utils.XnliProcessor`] テストセットにはゴールドラベルが付いているため、評価はテストセットで行われますのでご了承ください。 これらのプロセッサを使用する例は、[run_xnli.py](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-classification/run_xnli.py) スクリプトに示されています。 ## SQuAD [The Stanford Question Answering Dataset (SQuAD)](https://rajpurkar.github.io/SQuAD-explorer//) は、次のベンチマークです。 質問応答に関するモデルのパフォーマンスを評価します。 v1.1 と v2.0 の 2 つのバージョンが利用可能です。最初のバージョン (v1.1) は、論文 [SQuAD: 100,000+ question for Machine Comprehension of Text](https://arxiv.org/abs/1606.05250) とともにリリースされました。 2 番目のバージョン (v2.0) は、論文 [Know What You Don't と同時にリリースされました。 知っておくべき: SQuAD の答えられない質問](https://arxiv.org/abs/1806.03822)。 このライブラリは、次の 2 つのバージョンのそれぞれのプロセッサをホストします。 ### Processors それらのプロセッサは次のとおりです。 - [`~data.processors.utils.SquadV1Processor`] - [`~data.processors.utils.SquadV2Processor`] どちらも抽象クラス [`~data.processors.utils.SquadProcessor`] を継承しています。 [[autodoc]] data.processors.squad.SquadProcessor - all さらに、次のメソッドを使用して、SQuAD の例を次の形式に変換できます。 モデルの入力として使用できる [`~data.processors.utils.SquadFeatures`]。 [[autodoc]] data.processors.squad.squad_convert_examples_to_features これらのプロセッサと前述の方法は、データを含むファイルだけでなく、 *tensorflow_datasets* パッケージ。以下に例を示します。 ### Example usage 以下にプロセッサを使用した例と、データ ファイルを使用した変換方法を示します。 ```python # Loading a V2 processor processor = SquadV2Processor() examples = processor.get_dev_examples(squad_v2_data_dir) # Loading a V1 processor processor = SquadV1Processor() examples = processor.get_dev_examples(squad_v1_data_dir) features = squad_convert_examples_to_features( examples=examples, tokenizer=tokenizer, max_seq_length=max_seq_length, doc_stride=args.doc_stride, max_query_length=max_query_length, is_training=not evaluate, ) ``` *tensorflow_datasets* の使用は、データ ファイルを使用するのと同じくらい簡単です。 ```python # tensorflow_datasets only handle Squad V1. tfds_examples = tfds.load("squad") examples = SquadV1Processor().get_examples_from_dataset(tfds_examples, evaluate=evaluate) features = squad_convert_examples_to_features( examples=examples, tokenizer=tokenizer, max_seq_length=max_seq_length, doc_stride=args.doc_stride, max_query_length=max_query_length, is_training=not evaluate, ) ``` これらのプロセッサを使用する別の例は、[run_squad.py](https://github.com/huggingface/transformers/tree/main/examples/legacy/question-answering/run_squad.py) スクリプトに示されています。
transformers/docs/source/ja/main_classes/processors.md/0
{ "file_path": "transformers/docs/source/ja/main_classes/processors.md", "repo_id": "transformers", "token_count": 3103 }
277
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # BertGeneration ## Overview BertGeneration モデルは、次を使用してシーケンス間のタスクに利用できる BERT モデルです。 [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) で提案されている [`EncoderDecoderModel`] タスク、Sascha Rothe、Sishi Nagayan、Aliaksei Severyn 著。 論文の要約は次のとおりです。 *大規模なニューラル モデルの教師なし事前トレーニングは、最近、自然言語処理に革命をもたらしました。による NLP 実践者は、公開されたチェックポイントからウォームスタートして、複数の項目で最先端の技術を推進してきました。 コンピューティング時間を大幅に節約しながらベンチマークを実行します。これまでのところ、主に自然言語に焦点を当ててきました。 タスクを理解する。この論文では、シーケンス生成のための事前トレーニングされたチェックポイントの有効性を実証します。私たちは 公開されている事前トレーニング済み BERT と互換性のある Transformer ベースのシーケンス間モデルを開発しました。 GPT-2 および RoBERTa チェックポイントを使用し、モデルの初期化の有用性について広範な実証研究を実施しました。 エンコーダとデコーダ、これらのチェックポイント。私たちのモデルは、機械翻訳に関する新しい最先端の結果をもたらします。 テキストの要約、文の分割、および文の融合。* ## Usage examples and tips - モデルを [`EncoderDecoderModel`] と組み合わせて使用​​して、2 つの事前トレーニングされたモデルを活用できます。 後続の微調整のための BERT チェックポイント。 ```python >>> # leverage checkpoints for Bert2Bert model... >>> # use BERT's cls token as BOS token and sep token as EOS token >>> encoder = BertGenerationEncoder.from_pretrained("google-bert/bert-large-uncased", bos_token_id=101, eos_token_id=102) >>> # add cross attention layers and use BERT's cls token as BOS token and sep token as EOS token >>> decoder = BertGenerationDecoder.from_pretrained( ... "google-bert/bert-large-uncased", add_cross_attention=True, is_decoder=True, bos_token_id=101, eos_token_id=102 ... ) >>> bert2bert = EncoderDecoderModel(encoder=encoder, decoder=decoder) >>> # create tokenizer... >>> tokenizer = BertTokenizer.from_pretrained("google-bert/bert-large-uncased") >>> input_ids = tokenizer( ... "This is a long article to summarize", add_special_tokens=False, return_tensors="pt" ... ).input_ids >>> labels = tokenizer("This is a short summary", return_tensors="pt").input_ids >>> # train... >>> loss = bert2bert(input_ids=input_ids, decoder_input_ids=labels, labels=labels).loss >>> loss.backward() ``` - 事前トレーニングされた [`EncoderDecoderModel`] もモデル ハブで直接利用できます。 ```python >>> # instantiate sentence fusion model >>> sentence_fuser = EncoderDecoderModel.from_pretrained("google/roberta2roberta_L-24_discofuse") >>> tokenizer = AutoTokenizer.from_pretrained("google/roberta2roberta_L-24_discofuse") >>> input_ids = tokenizer( ... "This is the first sentence. This is the second sentence.", add_special_tokens=False, return_tensors="pt" ... ).input_ids >>> outputs = sentence_fuser.generate(input_ids) >>> print(tokenizer.decode(outputs[0])) ``` チップ: - [`BertGenerationEncoder`] と [`BertGenerationDecoder`] は、 [`EncoderDecoder`] と組み合わせます。 - 要約、文の分割、文の融合、および翻訳の場合、入力に特別なトークンは必要ありません。 したがって、入力の末尾に EOS トークンを追加しないでください。 このモデルは、[patrickvonplaten](https://huggingface.co/patrickvonplaten) によって提供されました。元のコードは次のとおりです [ここ](https://tfhub.dev/s?module-type=text-generation&subtype=module,placeholder) があります。 ## BertGenerationConfig [[autodoc]] BertGenerationConfig ## BertGenerationTokenizer [[autodoc]] BertGenerationTokenizer - save_vocabulary ## BertGenerationEncoder [[autodoc]] BertGenerationEncoder - forward ## BertGenerationDecoder [[autodoc]] BertGenerationDecoder - forward
transformers/docs/source/ja/model_doc/bert-generation.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/bert-generation.md", "repo_id": "transformers", "token_count": 1974 }
278
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # ByT5 ## Overview ByT5 モデルは、[ByT5: Towards a token-free future with pre-trained byte-to-byte models](https://arxiv.org/abs/2105.13626) by Linting Xue, Aditya Barua, Noah Constant, Rami Al-Rfou, Sharan Narang, Mihir Kale, Adam Roberts, Colin Raffel. 論文の要約は次のとおりです。 *最も広く使用されている事前トレーニング済み言語モデルは、単語またはサブワード単位に対応するトークンのシーケンスで動作します。 テキストをトークンのシーケンスとしてエンコードするには、トークナイザーが必要です。トークナイザーは通常、 モデル。代わりに生のテキスト (バイトまたは文字) を直接操作するトークンフリー モデルには多くの利点があります。 すぐに使用できるあらゆる言語のテキストを処理でき、ノイズに対してより堅牢であり、技術的負債を最小限に抑えます。 複雑でエラーが発生しやすいテキスト前処理パイプラインを削除します。バイトまたは文字列がトークンより長いため トークンフリー モデルに関する過去の研究では、シーケンスのコストを償却するように設計された新しいモデル アーキテクチャが導入されることがよくありました。 生のテキストを直接操作します。この論文では、標準的な Transformer アーキテクチャが次のようなもので使用できることを示します。 バイトシーケンスを処理するための最小限の変更。パラメータ数の観点からトレードオフを注意深く特徴付けます。 FLOP のトレーニングと推論速度を調べ、バイトレベルのモデルがトークンレベルと競合できることを示します。 対応者。また、バイトレベルのモデルはノイズに対して大幅に堅牢であり、より優れたパフォーマンスを発揮することも示しています。 スペルと発音に敏感なタスク。私たちの貢献の一環として、新しいセットをリリースします。 T5 アーキテクチャに基づいた事前トレーニング済みのバイトレベルの Transformer モデルと、そこで使用されるすべてのコードとデータ 実験。* このモデルは、[patrickvonplaten](https://huggingface.co/patrickvonplaten) によって提供されました。元のコードは次のとおりです [ここ](https://github.com/google-research/byt5) にあります。 <Tip> ByT5 のアーキテクチャは T5v1.1 モデルに基づいています。API リファレンスについては、[T5v1.1 のドキュメント ページ](t5v1.1) を参照してください。彼らは モデルの入力を準備する方法が異なるだけです。以下のコード例を参照してください。 </Tip> ByT5 は教師なしで事前トレーニングされているため、単一タスク中にタスク プレフィックスを使用する利点はありません。 微調整。マルチタスクの微調整を行う場合は、プレフィックスを使用する必要があります。 ## Usage Examples ByT5 は生の UTF-8 バイトで動作するため、トークナイザーなしで使用できます。 ```python >>> from transformers import T5ForConditionalGeneration >>> import torch >>> model = T5ForConditionalGeneration.from_pretrained("google/byt5-small") >>> num_special_tokens = 3 >>> # Model has 3 special tokens which take up the input ids 0,1,2 of ByT5. >>> # => Need to shift utf-8 character encodings by 3 before passing ids to model. >>> input_ids = torch.tensor([list("Life is like a box of chocolates.".encode("utf-8"))]) + num_special_tokens >>> labels = torch.tensor([list("La vie est comme une boîte de chocolat.".encode("utf-8"))]) + num_special_tokens >>> loss = model(input_ids, labels=labels).loss >>> loss.item() 2.66 ``` ただし、バッチ推論とトレーニングの場合は、トークナイザーを使用することをお勧めします。 ```python >>> from transformers import T5ForConditionalGeneration, AutoTokenizer >>> model = T5ForConditionalGeneration.from_pretrained("google/byt5-small") >>> tokenizer = AutoTokenizer.from_pretrained("google/byt5-small") >>> model_inputs = tokenizer( ... ["Life is like a box of chocolates.", "Today is Monday."], padding="longest", return_tensors="pt" ... ) >>> labels_dict = tokenizer( ... ["La vie est comme une boîte de chocolat.", "Aujourd'hui c'est lundi."], padding="longest", return_tensors="pt" ... ) >>> labels = labels_dict.input_ids >>> loss = model(**model_inputs, labels=labels).loss >>> loss.item() 17.9 ``` [T5](t5) と同様に、ByT5 はスパンマスクノイズ除去タスクでトレーニングされました。しかし、 モデルはキャラクターに直接作用するため、事前トレーニングタスクは少し複雑です 違う。のいくつかの文字を破損してみましょう `"The dog chases a ball in the park."`という文を入力し、ByT5 に予測してもらいます。 わたしたちのため。 ```python >>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("google/byt5-base") >>> model = AutoModelForSeq2SeqLM.from_pretrained("google/byt5-base") >>> input_ids_prompt = "The dog chases a ball in the park." >>> input_ids = tokenizer(input_ids_prompt).input_ids >>> # Note that we cannot add "{extra_id_...}" to the string directly >>> # as the Byte tokenizer would incorrectly merge the tokens >>> # For ByT5, we need to work directly on the character level >>> # Contrary to T5, ByT5 does not use sentinel tokens for masking, but instead >>> # uses final utf character ids. >>> # UTF-8 is represented by 8 bits and ByT5 has 3 special tokens. >>> # => There are 2**8+2 = 259 input ids and mask tokens count down from index 258. >>> # => mask to "The dog [258]a ball [257]park." >>> input_ids = torch.tensor([input_ids[:8] + [258] + input_ids[14:21] + [257] + input_ids[28:]]) >>> input_ids tensor([[ 87, 107, 104, 35, 103, 114, 106, 35, 258, 35, 100, 35, 101, 100, 111, 111, 257, 35, 115, 100, 117, 110, 49, 1]]) >>> # ByT5 produces only one char at a time so we need to produce many more output characters here -> set `max_length=100`. >>> output_ids = model.generate(input_ids, max_length=100)[0].tolist() >>> output_ids [0, 258, 108, 118, 35, 119, 107, 104, 35, 114, 113, 104, 35, 122, 107, 114, 35, 103, 114, 104, 118, 257, 35, 108, 113, 35, 119, 107, 104, 35, 103, 108, 118, 102, 114, 256, 108, 113, 35, 119, 107, 104, 35, 115, 100, 117, 110, 49, 35, 87, 107, 104, 35, 103, 114, 106, 35, 108, 118, 35, 119, 107, 104, 35, 114, 113, 104, 35, 122, 107, 114, 35, 103, 114, 104, 118, 35, 100, 35, 101, 100, 111, 111, 35, 108, 113, 255, 35, 108, 113, 35, 119, 107, 104, 35, 115, 100, 117, 110, 49] >>> # ^- Note how 258 descends to 257, 256, 255 >>> # Now we need to split on the sentinel tokens, let's write a short loop for this >>> output_ids_list = [] >>> start_token = 0 >>> sentinel_token = 258 >>> while sentinel_token in output_ids: ... split_idx = output_ids.index(sentinel_token) ... output_ids_list.append(output_ids[start_token:split_idx]) ... start_token = split_idx ... sentinel_token -= 1 >>> output_ids_list.append(output_ids[start_token:]) >>> output_string = tokenizer.batch_decode(output_ids_list) >>> output_string ['<pad>', 'is the one who does', ' in the disco', 'in the park. The dog is the one who does a ball in', ' in the park.'] ``` ## ByT5Tokenizer [[autodoc]] ByT5Tokenizer 詳細については、[`ByT5Tokenizer`] を参照してください。
transformers/docs/source/ja/model_doc/byt5.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/byt5.md", "repo_id": "transformers", "token_count": 3268 }
279
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # CTRL <div class="flex flex-wrap space-x-1"> <a href="https://huggingface.co/models?filter=Salesforce/ctrl"> <img alt="Models" src="https://img.shields.io/badge/All_model_pages-ctrl-blueviolet"> </a> <a href="https://huggingface.co/spaces/docs-demos/tiny-ctrl"> <img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"> </a> </div> ## Overview CTRL モデルは、Nitish Shirish Keskar*、Bryan McCann*、Lav R. Varshney、Caiming Xiong, Richard Socher によって [CTRL: A Conditional Transformer Language Model for Controllable Generation](https://arxiv.org/abs/1909.05858) で提案されました。 リチャード・ソーチャー。これは、非常に大規模なコーパスの言語モデリングを使用して事前トレーニングされた因果的 (一方向) トランスフォーマーです 最初のトークンが制御コード (リンク、書籍、Wikipedia など) として予約されている、約 140 GB のテキスト データ。 論文の要約は次のとおりです。 *大規模な言語モデルは有望なテキスト生成機能を示していますが、ユーザーは特定の言語モデルを簡単に制御できません 生成されたテキストの側面。 16 億 3,000 万パラメータの条件付きトランスフォーマー言語モデルである CTRL をリリースします。 スタイル、コンテンツ、タスク固有の動作を制御する制御コードを条件付けるように訓練されています。制御コードは 生のテキストと自然に共生する構造から派生し、教師なし学習の利点を維持しながら、 テキスト生成をより明示的に制御できるようになります。これらのコードを使用すると、CTRL でどの部分が予測されるのかを予測することもできます。 トレーニング データにはシーケンスが与えられる可能性が最も高くなります。これにより、大量のデータを分析するための潜在的な方法が提供されます。 モデルベースのソース帰属を介して。* このモデルは、[keskarnitishr](https://huggingface.co/keskarnitishr) によって提供されました。元のコードが見つかる [こちら](https://github.com/salesforce/Salesforce/ctrl)。 ## Usage tips - CTRL は制御コードを利用してテキストを生成します。生成を特定の単語や文で開始する必要があります。 またはリンクして一貫したテキストを生成します。 [元の実装](https://github.com/salesforce/Salesforce/ctrl) を参照してください。 詳しくは。 - CTRL は絶対位置埋め込みを備えたモデルであるため、通常は入力を右側にパディングすることをお勧めします。 左。 - CTRL は因果言語モデリング (CLM) の目的でトレーニングされているため、次の予測に強力です。 シーケンス内のトークン。この機能を利用すると、CTRL は構文的に一貫したテキストを生成できるようになります。 *run_generation.py* サンプル スクリプトで確認できます。 - PyTorch モデルは、以前に計算されたキーと値のアテンション ペアである`past_key_values`を入力として受け取ることができます。 TensorFlow モデルは`past`を入力として受け入れます。 `past_key_values`値を使用すると、モデルが再計算されなくなります。 テキスト生成のコンテキストで事前に計算された値。 [`forward`](model_doc/ctrl#transformers.CTRLModel.forward) を参照してください。 この引数の使用法の詳細については、メソッドを参照してください。 ## Resources - [テキスト分類タスクガイド](../tasks/sequence_classification) - [因果言語モデリング タスク ガイド](../tasks/language_modeling) ## CTRLConfig [[autodoc]] CTRLConfig ## CTRLTokenizer [[autodoc]] CTRLTokenizer - save_vocabulary <frameworkcontent> <pt> ## CTRLModel [[autodoc]] CTRLModel - forward ## CTRLLMHeadModel [[autodoc]] CTRLLMHeadModel - forward ## CTRLForSequenceClassification [[autodoc]] CTRLForSequenceClassification - forward </pt> <tf> ## TFCTRLModel [[autodoc]] TFCTRLModel - call ## TFCTRLLMHeadModel [[autodoc]] TFCTRLLMHeadModel - call ## TFCTRLForSequenceClassification [[autodoc]] TFCTRLForSequenceClassification - call </tf> </frameworkcontent>
transformers/docs/source/ja/model_doc/ctrl.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/ctrl.md", "repo_id": "transformers", "token_count": 2127 }
280
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 推論のための多言語モデル [[open-in-colab]] 🤗 Transformers にはいくつかの多言語モデルがあり、それらの推論の使用方法は単一言語モデルとは異なります。ただし、多言語モデルの使用方法がすべて異なるわけではありません。 [google-bert/bert-base-multilingual-uncased](https://huggingface.co/google-bert/bert-base-multilingual-uncased) などの一部のモデルは、単一言語モデルと同様に使用できます。 このガイドでは、推論のために使用方法が異なる多言語モデルをどのように使うかを示します。 ## XLM XLM には10の異なるチェックポイントがあり、そのうちの1つだけが単一言語です。 残りの9つのモデルチェックポイントは、言語埋め込みを使用するチェックポイントと使用しないチェックポイントの2つのカテゴリに分けることができます。 ### 言語の埋め込みがある XLM 次の XLM モデルは、言語の埋め込みを使用して、推論で使用される言語を指定します。 - `FacebookAI/xlm-mlm-ende-1024` (マスク化された言語モデリング、英語-ドイツ語) - `FacebookAI/xlm-mlm-enfr-1024` (マスク化された言語モデリング、英語-フランス語) - `FacebookAI/xlm-mlm-enro-1024` (マスク化された言語モデリング、英語-ルーマニア語) - `FacebookAI/xlm-mlm-xnli15-1024` (マスク化された言語モデリング、XNLI 言語) - `FacebookAI/xlm-mlm-tlm-xnli15-1024` (マスク化された言語モデリング + 翻訳 + XNLI 言語) - `FacebookAI/xlm-clm-enfr-1024` (因果言語モデリング、英語-フランス語) - `FacebookAI/xlm-clm-ende-1024` (因果言語モデリング、英語-ドイツ語) 言語の埋め込みは、モデルに渡される `input_ids` と同じ形状のテンソルとして表されます。 これらのテンソルの値は、使用される言語に依存し、トークナイザーの `lang2id` および `id2lang` 属性によって識別されます。 この例では、`FacebookAI/xlm-clm-enfr-1024` チェックポイントをロードします (因果言語モデリング、英語-フランス語)。 ```py >>> import torch >>> from transformers import XLMTokenizer, XLMWithLMHeadModel >>> tokenizer = XLMTokenizer.from_pretrained("FacebookAI/xlm-clm-enfr-1024") >>> model = XLMWithLMHeadModel.from_pretrained("FacebookAI/xlm-clm-enfr-1024") ``` トークナイザーの `lang2id` 属性は、このモデルの言語とその ID を表示します。 ```py >>> print(tokenizer.lang2id) {'en': 0, 'fr': 1} ``` 次に、入力例を作成します。 ```py >>> input_ids = torch.tensor([tokenizer.encode("Wikipedia was used to")]) # batch size of 1 ``` 言語 ID を `en` に設定し、それを使用して言語の埋め込みを定義します。 言語の埋め込みは、英語の言語 ID であるため、`0` で埋められたテンソルです。 このテンソルは `input_ids` と同じサイズにする必要があります。 ```py >>> language_id = tokenizer.lang2id["en"] # 0 >>> langs = torch.tensor([language_id] * input_ids.shape[1]) # torch.tensor([0, 0, 0, ..., 0]) >>> # We reshape it to be of size (batch_size, sequence_length) >>> langs = langs.view(1, -1) # is now of shape [1, sequence_length] (we have a batch size of 1) ``` これで、`input_ids` と言語の埋め込みをモデルに渡すことができます。 ```py >>> outputs = model(input_ids, langs=langs) ``` [run_generation.py](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-generation/run_generation.py) スクリプトは、`xlm-clm` チェックポイントを使用して、言語が埋め込まれたテキストを生成できます。 ### 言語の埋め込みがないXLM 次の XLM モデルは、推論中に言語の埋め込みを必要としません。 - `FacebookAI/xlm-mlm-17-1280` (マスク化された言語モデリング、17の言語) - `FacebookAI/xlm-mlm-100-1280` (マスク化された言語モデリング、100の言語) これらのモデルは、以前の XLM チェックポイントとは異なり、一般的な文の表現に使用されます。 ## BERT 以下の BERT モデルは、多言語タスクに使用できます。 - `google-bert/bert-base-multilingual-uncased` (マスク化された言語モデリング + 次の文の予測、102の言語) - `google-bert/bert-base-multilingual-cased` (マスク化された言語モデリング + 次の文の予測、104の言語) これらのモデルは、推論中に言語の埋め込みを必要としません。 文脈から言語を識別し、それに応じて推測する必要があります。 ## XLM-RoBERTa 次の XLM-RoBERTa モデルは、多言語タスクに使用できます。 - `FacebookAI/xlm-roberta-base` (マスク化された言語モデリング、100の言語) - `FacebookAI/xlm-roberta-large` (マスク化された言語モデリング、100の言語) XLM-RoBERTa は、100の言語で新しく作成およびクリーニングされた2.5 TB の CommonCrawl データでトレーニングされました。 これは、分類、シーケンスのラベル付け、質問応答などのダウンストリームタスクで、mBERT や XLM などの以前にリリースされた多言語モデルを大幅に改善します。 ## M2M100 次の M2M100 モデルは、多言語翻訳に使用できます。 - `facebook/m2m100_418M` (翻訳) - `facebook/m2m100_1.2B` (翻訳) この例では、`facebook/m2m100_418M` チェックポイントをロードして、中国語から英語に翻訳します。 トークナイザーでソース言語を設定できます。 ```py >>> from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer >>> en_text = "Do not meddle in the affairs of wizards, for they are subtle and quick to anger." >>> chinese_text = "不要插手巫師的事務, 因為他們是微妙的, 很快就會發怒." >>> tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M", src_lang="zh") >>> model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M") ``` テキストをトークン化します。 ```py >>> encoded_zh = tokenizer(chinese_text, return_tensors="pt") ``` M2M100 は、最初に生成されたトークンとしてターゲット言語 ID を強制的にターゲット言語に翻訳します。 英語に翻訳するには、`generate` メソッドで `forced_bos_token_id` を `en` に設定します。 ```py >>> generated_tokens = model.generate(**encoded_zh, forced_bos_token_id=tokenizer.get_lang_id("en")) >>> tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) 'Do not interfere with the matters of the witches, because they are delicate and will soon be angry.' ``` ## MBart 多言語翻訳には、次の MBart モデルを使用できます。 - `facebook/mbart-large-50-one-to-many-mmt` (One-to-many multilingual machine translation, 50 languages) - `facebook/mbart-large-50-many-to-many-mmt` (Many-to-many multilingual machine translation, 50 languages) - `facebook/mbart-large-50-many-to-one-mmt` (Many-to-one multilingual machine translation, 50 languages) - `facebook/mbart-large-50` (Multilingual translation, 50 languages) - `facebook/mbart-large-cc25` この例では、`facebook/mbart-large-50-many-to-many-mmt` チェックポイントをロードして、フィンランド語を英語に翻訳します。トークナイザーでソース言語を設定できます。 ```py >>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM >>> en_text = "Do not meddle in the affairs of wizards, for they are subtle and quick to anger." >>> fi_text = "Älä sekaannu velhojen asioihin, sillä ne ovat hienovaraisia ja nopeasti vihaisia." >>> tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-50-many-to-many-mmt", src_lang="fi_FI") >>> model = AutoModelForSeq2SeqLM.from_pretrained("facebook/mbart-large-50-many-to-many-mmt") ``` テキストをトークン化します。 ```py >>> encoded_en = tokenizer(en_text, return_tensors="pt") ``` MBart は、最初に生成されたトークンとしてターゲット言語 ID を強制的にターゲット言語に翻訳します。 英語に翻訳するには、`generate` メソッドで `forced_bos_token_id` を `en` に設定します。 ```py >>> generated_tokens = model.generate(**encoded_en, forced_bos_token_id=tokenizer.lang_code_to_id("en_XX")) >>> tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) "Don't interfere with the wizard's affairs, because they are subtle, will soon get angry." ``` `facebook/mbart-large-50-many-to-one-mmt` チェックポイントを使用している場合、最初に生成されたトークンとしてターゲット言語 ID を強制する必要はありません。それ以外の場合、使用方法は同じです。
transformers/docs/source/ja/multilingual.md/0
{ "file_path": "transformers/docs/source/ja/multilingual.md", "repo_id": "transformers", "token_count": 4144 }
281
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Video classification [[open-in-colab]] ビデオ分類は、ビデオ全体にラベルまたはクラスを割り当てるタスクです。ビデオには、各ビデオに 1 つのクラスのみが含まれることが期待されます。ビデオ分類モデルはビデオを入力として受け取り、ビデオがどのクラスに属するかについての予測を返します。これらのモデルを使用して、ビデオの内容を分類できます。ビデオ分類の実際のアプリケーションはアクション/アクティビティ認識であり、フィットネス アプリケーションに役立ちます。また、視覚障害のある人にとって、特に通勤時に役立ちます。 このガイドでは、次の方法を説明します。 1. [UCF101](https://www.crcv.ucf.edu/) のサブセットで [VideoMAE](https://huggingface.co/docs/transformers/main/en/model_doc/videomae) を微調整します。 data/UCF101.php) データセット。 2. 微調整したモデルを推論に使用します。 <Tip> このチュートリアルで説明するタスクは、次のモデル アーキテクチャでサポートされています。 <!--This tip is automatically generated by `make fix-copies`, do not fill manually!--> [TimeSformer](../model_doc/timesformer), [VideoMAE](../model_doc/videomae), [ViViT](../model_doc/vivit) <!--End of the generated tip--> </Tip> 始める前に、必要なライブラリがすべてインストールされていることを確認してください。 ```bash pip install -q pytorchvideo transformers evaluate ``` [PyTorchVideo](https://pytorchvideo.org/) (`pytorchvideo` と呼ばれます) を使用してビデオを処理し、準備します。 モデルをアップロードしてコミュニティと共有できるように、Hugging Face アカウントにログインすることをお勧めします。プロンプトが表示されたら、トークンを入力してログインします。 ```py >>> from huggingface_hub import notebook_login >>> notebook_login() ``` ## Load UCF101 dataset まず、[UCF-101 データセット](https://www.crcv.ucf.edu/data/UCF101.php) のサブセットをロードします。これにより、完全なデータセットのトレーニングにさらに時間を費やす前に、実験してすべてが機能することを確認する機会が得られます。 ```py >>> from huggingface_hub import hf_hub_download >>> hf_dataset_identifier = "sayakpaul/ucf101-subset" >>> filename = "UCF101_subset.tar.gz" >>> file_path = hf_hub_download(repo_id=hf_dataset_identifier, filename=filename, repo_type="dataset") ``` サブセットをダウンロードした後、圧縮アーカイブを抽出する必要があります。 ```py >>> import tarfile >>> with tarfile.open(file_path) as t: ... t.extractall(".") ``` 大まかに言うと、データセットは次のように構成されています。 ```bash UCF101_subset/ train/ BandMarching/ video_1.mp4 video_2.mp4 ... Archery video_1.mp4 video_2.mp4 ... ... val/ BandMarching/ video_1.mp4 video_2.mp4 ... Archery video_1.mp4 video_2.mp4 ... ... test/ BandMarching/ video_1.mp4 video_2.mp4 ... Archery video_1.mp4 video_2.mp4 ... ... ``` (`sorted`)された ビデオ パスは次のように表示されます。 ```bash ... 'UCF101_subset/train/ApplyEyeMakeup/v_ApplyEyeMakeup_g07_c04.avi', 'UCF101_subset/train/ApplyEyeMakeup/v_ApplyEyeMakeup_g07_c06.avi', 'UCF101_subset/train/ApplyEyeMakeup/v_ApplyEyeMakeup_g08_c01.avi', 'UCF101_subset/train/ApplyEyeMakeup/v_ApplyEyeMakeup_g09_c02.avi', 'UCF101_subset/train/ApplyEyeMakeup/v_ApplyEyeMakeup_g09_c06.avi' ... ``` 同じグループ/シーンに属するビデオ クリップがあり、ビデオ ファイル パスではグループが`g`で示されていることがわかります。たとえば、`v_ApplyEyeMakeup_g07_c04.avi`や`v_ApplyEyeMakeup_g07_c06.avi`などです。 検証と評価の分割では、[データ漏洩](https://www.kaggle.com/code/alexisbcook/data-leakage) を防ぐために、同じグループ/シーンからのビデオ クリップを使用しないでください。このチュートリアルで使用しているサブセットでは、この情報が考慮されています。 次に、データセット内に存在するラベルのセットを取得します。また、モデルを初期化するときに役立つ 2 つの辞書を作成します。 * `label2id`: クラス名を整数にマップします。 * `id2label`: 整数をクラス名にマッピングします。 ```py >>> class_labels = sorted({str(path).split("/")[2] for path in all_video_file_paths}) >>> label2id = {label: i for i, label in enumerate(class_labels)} >>> id2label = {i: label for label, i in label2id.items()} >>> print(f"Unique classes: {list(label2id.keys())}.") # Unique classes: ['ApplyEyeMakeup', 'ApplyLipstick', 'Archery', 'BabyCrawling', 'BalanceBeam', 'BandMarching', 'BaseballPitch', 'Basketball', 'BasketballDunk', 'BenchPress']. ``` 個性的なクラスが10種類あります。トレーニング セットには、クラスごとに 30 個のビデオがあります。 ## Load a model to fine-tune 事前トレーニングされたチェックポイントとそれに関連する画像プロセッサからビデオ分類モデルをインスタンス化します。モデルのエンコーダーには事前トレーニングされたパラメーターが付属しており、分類ヘッドはランダムに初期化されます。画像プロセッサは、データセットの前処理パイプラインを作成するときに役立ちます。 ```py >>> from transformers import VideoMAEImageProcessor, VideoMAEForVideoClassification >>> model_ckpt = "MCG-NJU/videomae-base" >>> image_processor = VideoMAEImageProcessor.from_pretrained(model_ckpt) >>> model = VideoMAEForVideoClassification.from_pretrained( ... model_ckpt, ... label2id=label2id, ... id2label=id2label, ... ignore_mismatched_sizes=True, # provide this in case you're planning to fine-tune an already fine-tuned checkpoint ... ) ``` モデルのロード中に、次の警告が表示される場合があります。 ```bash Some weights of the model checkpoint at MCG-NJU/videomae-base were not used when initializing VideoMAEForVideoClassification: [..., 'decoder.decoder_layers.1.attention.output.dense.bias', 'decoder.decoder_layers.2.attention.attention.key.weight'] - This IS expected if you are initializing VideoMAEForVideoClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model). - This IS NOT expected if you are initializing VideoMAEForVideoClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model). Some weights of VideoMAEForVideoClassification were not initialized from the model checkpoint at MCG-NJU/videomae-base and are newly initialized: ['classifier.bias', 'classifier.weight'] You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. ``` この警告は、一部の重み (たとえば、`classifier`層の重みとバイアス) を破棄し、他のいくつかの重み (新しい`classifier`層の重みとバイアス) をランダムに初期化していることを示しています。この場合、これは予想されることです。事前にトレーニングされた重みを持たない新しい頭部を追加しているため、推論に使用する前にこのモデルを微調整する必要があるとライブラリが警告します。これはまさに私たちが行おうとしているものです。する。 **注意** [このチェックポイント](https://huggingface.co/MCG-NJU/videomae-base-finetuned-kinetics) は、同様のダウンストリームで微調整されてチェックポイントが取得されたため、このタスクのパフォーマンスが向上することに注意してください。かなりのドメインの重複があるタスク。 `MCG-NJU/videomae-base-finetuned-kinetics` を微調整して取得した [このチェックポイント](https://huggingface.co/sayakpaul/videomae-base-finetuned-kinetics-finetuned-ucf101-subset) を確認できます。 -キネティクス`。 ## Prepare the datasets for training ビデオの前処理には、[PyTorchVideo ライブラリ](https://pytorchvideo.org/) を利用します。まず、必要な依存関係をインポートします。 ```py >>> import pytorchvideo.data >>> from pytorchvideo.transforms import ( ... ApplyTransformToKey, ... Normalize, ... RandomShortSideScale, ... RemoveKey, ... ShortSideScale, ... UniformTemporalSubsample, ... ) >>> from torchvision.transforms import ( ... Compose, ... Lambda, ... RandomCrop, ... RandomHorizontalFlip, ... Resize, ... ) ``` トレーニング データセットの変換には、均一な時間サブサンプリング、ピクセル正規化、ランダム クロッピング、およびランダムな水平反転を組み合わせて使用​​します。検証および評価のデータセット変換では、ランダムなトリミングと水平反転を除き、同じ変換チェーンを維持します。これらの変換の詳細については、[PyTorchVideo の公式ドキュメント](https://pytorchvideo.org) を参照してください。 事前トレーニングされたモデルに関連付けられた`image_processor`を使用して、次の情報を取得します。 * ビデオ フレームのピクセルが正規化される画像の平均値と標準偏差。 * ビデオ フレームのサイズが変更される空間解像度。 まず、いくつかの定数を定義します。 ```py >>> mean = image_processor.image_mean >>> std = image_processor.image_std >>> if "shortest_edge" in image_processor.size: ... height = width = image_processor.size["shortest_edge"] >>> else: ... height = image_processor.size["height"] ... width = image_processor.size["width"] >>> resize_to = (height, width) >>> num_frames_to_sample = model.config.num_frames >>> sample_rate = 4 >>> fps = 30 >>> clip_duration = num_frames_to_sample * sample_rate / fps ``` 次に、データセット固有の変換とデータセットをそれぞれ定義します。トレーニングセットから始めます: ```py >>> train_transform = Compose( ... [ ... ApplyTransformToKey( ... key="video", ... transform=Compose( ... [ ... UniformTemporalSubsample(num_frames_to_sample), ... Lambda(lambda x: x / 255.0), ... Normalize(mean, std), ... RandomShortSideScale(min_size=256, max_size=320), ... RandomCrop(resize_to), ... RandomHorizontalFlip(p=0.5), ... ] ... ), ... ), ... ] ... ) >>> train_dataset = pytorchvideo.data.Ucf101( ... data_path=os.path.join(dataset_root_path, "train"), ... clip_sampler=pytorchvideo.data.make_clip_sampler("random", clip_duration), ... decode_audio=False, ... transform=train_transform, ... ) ``` 同じ一連のワークフローを検証セットと評価セットに適用できます。 ```py >>> val_transform = Compose( ... [ ... ApplyTransformToKey( ... key="video", ... transform=Compose( ... [ ... UniformTemporalSubsample(num_frames_to_sample), ... Lambda(lambda x: x / 255.0), ... Normalize(mean, std), ... Resize(resize_to), ... ] ... ), ... ), ... ] ... ) >>> val_dataset = pytorchvideo.data.Ucf101( ... data_path=os.path.join(dataset_root_path, "val"), ... clip_sampler=pytorchvideo.data.make_clip_sampler("uniform", clip_duration), ... decode_audio=False, ... transform=val_transform, ... ) >>> test_dataset = pytorchvideo.data.Ucf101( ... data_path=os.path.join(dataset_root_path, "test"), ... clip_sampler=pytorchvideo.data.make_clip_sampler("uniform", clip_duration), ... decode_audio=False, ... transform=val_transform, ... ) ``` **注意**: 上記のデータセット パイプラインは、[公式 PyTorchVideo サンプル](https://pytorchvideo.org/docs/tutorial_classification#dataset) から取得したものです。 [`pytorchvideo.data.Ucf101()`](https://pytorchvideo.readthedocs.io/en/latest/api/data/data.html#pytorchvideo.data.Ucf101) 関数を使用しています。 UCF-101 データセット。内部では、[`pytorchvideo.data.labeled_video_dataset.LabeledVideoDataset`](https://pytorchvideo.readthedocs.io/en/latest/api/data/data.html#pytorchvideo.data.LabeledVideoDataset) オブジェクトを返します。 `LabeledVideoDataset` クラスは、PyTorchVideo データセット内のすべてのビデオの基本クラスです。したがって、PyTorchVideo で既製でサポートされていないカスタム データセットを使用したい場合は、それに応じて `LabeledVideoDataset` クラスを拡張できます。詳細については、`data`API [ドキュメント](https://pytorchvideo.readthedocs.io/en/latest/api/data/data.html)を参照してください。また、データセットが同様の構造 (上に示したもの) に従っている場合は、`pytorchvideo.data.Ucf101()` を使用すると問題なく動作するはずです。 `num_videos` 引数にアクセスすると、データセット内のビデオの数を知ることができます。 ```py >>> print(train_dataset.num_videos, val_dataset.num_videos, test_dataset.num_videos) # (300, 30, 75) ``` ## Visualize the preprocessed video for better debugging ```py >>> import imageio >>> import numpy as np >>> from IPython.display import Image >>> def unnormalize_img(img): ... """Un-normalizes the image pixels.""" ... img = (img * std) + mean ... img = (img * 255).astype("uint8") ... return img.clip(0, 255) >>> def create_gif(video_tensor, filename="sample.gif"): ... """Prepares a GIF from a video tensor. ... ... The video tensor is expected to have the following shape: ... (num_frames, num_channels, height, width). ... """ ... frames = [] ... for video_frame in video_tensor: ... frame_unnormalized = unnormalize_img(video_frame.permute(1, 2, 0).numpy()) ... frames.append(frame_unnormalized) ... kargs = {"duration": 0.25} ... imageio.mimsave(filename, frames, "GIF", **kargs) ... return filename >>> def display_gif(video_tensor, gif_name="sample.gif"): ... """Prepares and displays a GIF from a video tensor.""" ... video_tensor = video_tensor.permute(1, 0, 2, 3) ... gif_filename = create_gif(video_tensor, gif_name) ... return Image(filename=gif_filename) >>> sample_video = next(iter(train_dataset)) >>> video_tensor = sample_video["video"] >>> display_gif(video_tensor) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/sample_gif.gif" alt="Person playing basketball"/> </div> ## Train the model 🤗 Transformers の [`Trainer`](https://huggingface.co/docs/transformers/main_classes/trainer) をモデルのトレーニングに利用します。 `Trainer`をインスタンス化するには、トレーニング構成と評価メトリクスを定義する必要があります。最も重要なのは [`TrainingArguments`](https://huggingface.co/transformers/main_classes/trainer.html#transformers.TrainingArguments) で、これはトレーニングを構成するためのすべての属性を含むクラスです。モデルのチェックポイントを保存するために使用される出力フォルダー名が必要です。また、🤗 Hub 上のモデル リポジトリ内のすべての情報を同期するのにも役立ちます。 トレーニング引数のほとんどは一目瞭然ですが、ここで非常に重要なのは`remove_unused_columns=False`です。これにより、モデルの呼び出し関数で使用されない機能が削除されます。デフォルトでは`True`です。これは、通常、未使用の特徴列を削除し、モデルの呼び出し関数への入力を解凍しやすくすることが理想的であるためです。ただし、この場合、`pixel_values` (モデルが入力で期待する必須キーです) を作成するには、未使用の機能 (特に`video`) が必要です。 ```py >>> from transformers import TrainingArguments, Trainer >>> model_name = model_ckpt.split("/")[-1] >>> new_model_name = f"{model_name}-finetuned-ucf101-subset" >>> num_epochs = 4 >>> args = TrainingArguments( ... new_model_name, ... remove_unused_columns=False, ... evaluation_strategy="epoch", ... save_strategy="epoch", ... learning_rate=5e-5, ... per_device_train_batch_size=batch_size, ... per_device_eval_batch_size=batch_size, ... warmup_ratio=0.1, ... logging_steps=10, ... load_best_model_at_end=True, ... metric_for_best_model="accuracy", ... push_to_hub=True, ... max_steps=(train_dataset.num_videos // batch_size) * num_epochs, ... ) ``` `pytorchvideo.data.Ucf101()` によって返されるデータセットは `__len__` メソッドを実装していません。そのため、`TrainingArguments`をインスタンス化するときに`max_steps`を定義する必要があります。 次に、予測からメトリクスを計算する関数を定義する必要があります。これは、これからロードする`metric`を使用します。必要な前処理は、予測されたロジットの argmax を取得することだけです。 ```py import evaluate metric = evaluate.load("accuracy") def compute_metrics(eval_pred): predictions = np.argmax(eval_pred.predictions, axis=1) return metric.compute(predictions=predictions, references=eval_pred.label_ids) ``` **評価に関する注意事項**: [VideoMAE 論文](https://arxiv.org/abs/2203.12602) では、著者は次の評価戦略を使用しています。彼らはテスト ビデオからのいくつかのクリップでモデルを評価し、それらのクリップにさまざまなクロップを適用して、合計スコアを報告します。ただし、単純さと簡潔さを保つために、このチュートリアルではそれを考慮しません。 また、サンプルをまとめてバッチ処理するために使用される `collat​​e_fn` を定義します。各バッチは、`pixel_values` と `labels` という 2 つのキーで構成されます。 ```py >>> def collate_fn(examples): ... # permute to (num_frames, num_channels, height, width) ... pixel_values = torch.stack( ... [example["video"].permute(1, 0, 2, 3) for example in examples] ... ) ... labels = torch.tensor([example["label"] for example in examples]) ... return {"pixel_values": pixel_values, "labels": labels} ``` 次に、これらすべてをデータセットとともに`Trainer`に渡すだけです。 ```py >>> trainer = Trainer( ... model, ... args, ... train_dataset=train_dataset, ... eval_dataset=val_dataset, ... tokenizer=image_processor, ... compute_metrics=compute_metrics, ... data_collator=collate_fn, ... ) ``` すでにデータを前処理しているのに、なぜトークナイザーとして`image_processor`を渡したのか不思議に思うかもしれません。これは、イメージ プロセッサ構成ファイル (JSON として保存) もハブ上のリポジトリにアップロードされるようにするためだけです。 次に、`train` メソッドを呼び出してモデルを微調整します。 ```py >>> train_results = trainer.train() ``` トレーニングが完了したら、 [`~transformers.Trainer.push_to_hub`] メソッドを使用してモデルをハブに共有し、誰もがモデルを使用できるようにします。 ```py >>> trainer.push_to_hub() ``` ## Inference モデルを微調整したので、それを推論に使用できるようになりました。 推論のためにビデオをロードします。 ```py >>> sample_test_video = next(iter(test_dataset)) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/sample_gif_two.gif" alt="Teams playing basketball"/> </div> 推論用に微調整されたモデルを試す最も簡単な方法は、それを [`pipeline`](https://huggingface.co/docs/transformers/main/en/main_classes/pipelines#transformers.VideoClassificationPipeline). で使用することです。モデルを使用してビデオ分類用の` pipeline`をインスタンス化し、それにビデオを渡します。 ```py >>> from transformers import pipeline >>> video_cls = pipeline(model="my_awesome_video_cls_model") >>> video_cls("https://huggingface.co/datasets/sayakpaul/ucf101-subset/resolve/main/v_BasketballDunk_g14_c06.avi") [{'score': 0.9272987842559814, 'label': 'BasketballDunk'}, {'score': 0.017777055501937866, 'label': 'BabyCrawling'}, {'score': 0.01663011871278286, 'label': 'BalanceBeam'}, {'score': 0.009560945443809032, 'label': 'BandMarching'}, {'score': 0.0068979403004050255, 'label': 'BaseballPitch'}] ``` 必要に応じて、`pipeline`の結果を手動で複製することもできます。 ```py >>> def run_inference(model, video): ... # (num_frames, num_channels, height, width) ... perumuted_sample_test_video = video.permute(1, 0, 2, 3) ... inputs = { ... "pixel_values": perumuted_sample_test_video.unsqueeze(0), ... "labels": torch.tensor( ... [sample_test_video["label"]] ... ), # this can be skipped if you don't have labels available. ... } ... device = torch.device("cuda" if torch.cuda.is_available() else "cpu") ... inputs = {k: v.to(device) for k, v in inputs.items()} ... model = model.to(device) ... # forward pass ... with torch.no_grad(): ... outputs = model(**inputs) ... logits = outputs.logits ... return logits ``` 次に、入力をモデルに渡し、`logits `を返します。 ```py >>> logits = run_inference(trained_model, sample_test_video["video"]) ``` `logits` をデコードすると、次のようになります。 ```py >>> predicted_class_idx = logits.argmax(-1).item() >>> print("Predicted class:", model.config.id2label[predicted_class_idx]) # Predicted class: BasketballDunk ```
transformers/docs/source/ja/tasks/video_classification.md/0
{ "file_path": "transformers/docs/source/ja/tasks/video_classification.md", "repo_id": "transformers", "token_count": 10074 }
282
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Hugging Face Transformers를 추가하는 방법은 무엇인가요? [[how-to-add-a-model-to-transformers]] Hugging Face Transformers 라이브러리는 커뮤니티 기여자들 덕분에 새로운 모델을 제공할 수 있는 경우가 많습니다. 하지만 이는 도전적인 프로젝트이며 Hugging Face Transformers 라이브러리와 구현할 모델에 대한 깊은 이해가 필요합니다. Hugging Face에서는 더 많은 커뮤니티 멤버가 모델을 적극적으로 추가할 수 있도록 지원하고자 하며, 이 가이드를 통해 PyTorch 모델을 추가하는 과정을 안내하고 있습니다 (PyTorch가 설치되어 있는지 확인해주세요). <Tip> TensorFlow 모델을 구현하고자 하는 경우 [🤗 Transformers 모델을 TensorFlow로 변환하는 방법](add_tensorflow_model) 가이드를 살펴보세요! </Tip> 이 과정을 진행하면 다음과 같은 내용을 이해하게 됩니다: - 오픈 소스의 모범 사례에 대한 통찰력을 얻습니다. - 가장 인기 있는 딥러닝 라이브러리의 설계 원칙을 이해합니다. - 대규모 모델을 효율적으로 테스트하는 방법을 배웁니다. - `black`, `ruff`, `make fix-copies`와 같은 Python 유틸리티를 통합하여 깔끔하고 가독성 있는 코드를 작성하는 방법을 배웁니다. Hugging Face 팀은 항상 도움을 줄 준비가 되어 있으므로 혼자가 아니라는 점을 기억하세요. 🤗 ❤️ 시작에 앞서 🤗 Transformers에 원하는 모델을 추가하기 위해 [New model addition](https://github.com/huggingface/transformers/issues/new?assignees=&labels=New+model&template=new-model-addition.yml) 이슈를 열어야 합니다. 특정 모델을 기여하는 데 특별히 까다로운 기준을 가지지 않는 경우 [New model label](https://github.com/huggingface/transformers/labels/New%20model)을 필터링하여 요청되지 않은 모델이 있는지 확인하고 작업할 수 있습니다. 새로운 모델 요청을 열었다면 첫 번째 단계는 🤗 Transformers에 익숙해지는 것입니다! ## 🤗 Transformers의 전반적인 개요 [[general-overview-of-transformers]] 먼저 🤗 Transformers에 대한 전반적인 개요를 파악해야 합니다. 🤗 Transformers는 매우 주관적인 라이브러리이기 때문에 해당 라이브러리의 철학이나 설계 선택 사항에 동의하지 않을 수도 있습니다. 그러나 우리의 경험상 라이브러리의 기본적인 설계 선택과 철학은 🤗 Transformers의 규모를 효율적으로 확장하면서 유지 보수 비용을 합리적인 수준으로 유지하는 것입니다. [라이브러리의 철학에 대한 문서](philosophy)를 읽는 것이 라이브러리를 더 잘 이해하는 좋은 시작점입니다. 모든 모델에 적용하려는 몇 가지 작업 방식에 대한 선택 사항이 있습니다: - 일반적으로 추상화보다는 구성을 선호합니다. - 코드를 복제하는 것이 항상 나쁜 것은 아닙니다. 코드의 가독성이나 접근성을 크게 향상시킨다면 복제하는 것은 좋습니다. - 모델 파일은 가능한 한 독립적으로 유지되어야 합니다. 따라서 특정 모델의 코드를 읽을 때 해당 `modeling_....py` 파일만 확인하면 됩니다. 우리는 라이브러리의 코드가 제품을 제공하는 수단뿐만 아니라 개선하고자 하는 제품이라고도 생각합니다. 따라서 모델을 추가할 때, 사용자는 모델을 사용할 사람뿐만 아니라 코드를 읽고 이해하고 필요한 경우 조정할 수 있는 모든 사람까지도 포함한다는 점을 기억해야 합니다. 이를 염두에 두고 일반적인 라이브러리 설계에 대해 조금 더 자세히 알아보겠습니다. ### 모델 개요 [[overview-of-models]] 모델을 성공적으로 추가하려면 모델과 해당 구성인 [`PreTrainedModel`] 및 [`PretrainedConfig`] 간의 상호작용을 이해하는 것이 중요합니다. 예를 들어, 🤗 Transformers에 추가하려는 모델을 `BrandNewBert`라고 부르겠습니다. 다음을 살펴보겠습니다: <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers_overview.png"/> 보다시피, 🤗 Transformers에서는 상속을 사용하지만 추상화 수준을 최소한으로 유지합니다. 라이브러리의 어떤 모델에서도 두 수준 이상의 추상화가 존재하지 않습니다. `BrandNewBertModel`은 `BrandNewBertPreTrainedModel`에서 상속받고, 이 클래스는 [`PreTrainedModel`]에서 상속받습니다. 이로써 새로운 모델은 [`PreTrainedModel`]에만 의존하도록 하려고 합니다. 모든 새로운 모델에 자동으로 제공되는 중요한 기능은 [`~PreTrainedModel.from_pretrained`] 및 [`~PreTrainedModel.save_pretrained`]입니다. 이러한 기능 외에도 `BrandNewBertModel.forward`와 같은 다른 중요한 기능은 새로운 `modeling_brand_new_bert.py` 스크립트에서 완전히 정의되어야 합니다. 또한 `BrandNewBertForMaskedLM`과 같은 특정 헤드 레이어를 가진 모델은 `BrandNewBertModel`을 상속받지 않고 forward pass에서 호출할 수 있는 `BrandNewBertModel`을 사용하여 추상화 수준을 낮게 유지합니다. 모든 새로운 모델은 `BrandNewBertConfig`라는 구성 클래스를 필요로 합니다. 이 구성은 항상 [`PreTrainedModel`]의 속성으로 저장되며, 따라서 `BrandNewBertPreTrainedModel`을 상속받는 모든 클래스에서 `config` 속성을 통해 액세스할 수 있습니다: ```python model = BrandNewBertModel.from_pretrained("brandy/brand_new_bert") model.config # model has access to its config ``` 모델과 마찬가지로 구성은 [`PretrainedConfig`]에서 기본 직렬화 및 역직렬화 기능을 상속받습니다. 구성과 모델은 항상 *pytorch_model.bin* 파일과 *config.json* 파일로 각각 별도로 직렬화됩니다. [`~PreTrainedModel.save_pretrained`]를 호출하면 자동으로 [`~PretrainedConfig.save_pretrained`]도 호출되므로 모델과 구성이 모두 저장됩니다. ### 코드 스타일 [[code-style]] 새로운 모델을 작성할 때, Transformers는 주관적인 라이브러리이며 몇 가지 독특한 코딩 스타일이 있습니다: 1. 모델의 forward pass는 모델 파일에 완전히 작성되어야 합니다. 라이브러리의 다른 모델에서 블록을 재사용하려면 코드를 복사하여 위에 `# Copied from` 주석과 함께 붙여넣으면 됩니다 (예: [여기](https://github.com/huggingface/transformers/blob/v4.17.0/src/transformers/models/roberta/modeling_roberta.py#L160)를 참조하세요). 2. 코드는 완전히 이해하기 쉬워야 합니다. 변수 이름을 명확하게 지정하고 약어를 사용하지 않는 것이 좋습니다. 예를 들어, `act`보다는 `activation`을 선호합니다. 한 글자 변수 이름은 루프의 인덱스인 경우를 제외하고 권장되지 않습니다. 3. 더 일반적으로, 짧은 마법 같은 코드보다는 길고 명시적인 코드를 선호합니다. 4. PyTorch에서 `nn.Sequential`을 하위 클래스로 만들지 말고 `nn.Module`을 하위 클래스로 만들고 forward pass를 작성하여 다른 사람이 코드를 빠르게 디버그할 수 있도록 합니다. print 문이나 중단점을 추가할 수 있습니다. 5. 함수 시그니처에는 타입 주석을 사용해야 합니다. 그 외에는 타입 주석보다 변수 이름이 훨씬 읽기 쉽고 이해하기 쉽습니다. ### 토크나이저 개요 [[overview-of-tokenizers]] 아직 준비되지 않았습니다 :-( 이 섹션은 곧 추가될 예정입니다! ## 🤗 Transformers에 모델 추가하는 단계별 방법 [[stepbystep-recipe-to-add-a-model-to-transformers]] 각자 모델을 이식하는 방법에 대한 선호가 다르기 때문에 다른 기여자들이 Hugging Face에 모델을 이식하는 방법에 대한 요약을 살펴보는 것이 매우 유용할 수 있습니다. 다음은 모델을 이식하는 방법에 대한 커뮤니티 블로그 게시물 목록입니다: 1. [GPT2 모델 이식하기](https://medium.com/huggingface/from-tensorflow-to-pytorch-265f40ef2a28) - [Thomas](https://huggingface.co/thomwolf) 2. [WMT19 MT 모델 이식하기](https://huggingface.co/blog/porting-fsmt) - [Stas](https://huggingface.co/stas) 경험상 모델을 추가할 때 주의해야 할 가장 중요한 사항은 다음과 같습니다: - 같은 일을 반복하지 마세요! 새로운 🤗 Transformers 모델을 위해 추가할 코드의 대부분은 이미 🤗 Transformers 어딘가에 존재합니다. 이미 존재하는 복사할 수 있는 유사한 모델과 토크나이저를 찾는데 시간을 투자하세요. [grep](https://www.gnu.org/software/grep/)와 [rg](https://github.com/BurntSushi/ripgrep)를 참고하세요. 모델의 토크나이저가 한 모델을 기반으로 하고 모델링 코드가 다른 모델을 기반으로 하는 경우가 존재할 수도 있습니다. 예를 들어 FSMT의 모델링 코드는 BART를 기반으로 하고 FSMT의 토크나이저 코드는 XLM을 기반으로 합니다. - 이것은 과학적인 도전보다는 공학적인 도전입니다. 논문의 모델의 모든 이론적 측면을 이해하려는 것보다 효율적인 디버깅 환경을 만드는 데 더 많은 시간을 소비해야 합니다. - 막힐 때 도움을 요청하세요! 모델은 🤗 Transformers의 핵심 구성 요소이므로 Hugging Face의 우리는 당신이 모델을 추가하는 각 단계에서 기꺼이 도움을 줄 준비가 되어 있습니다. 진전이 없다고 느끼면 주저하지 말고 도움을 요청하세요. 다음에서는 모델을 🤗 Transformers로 이식하는 데 가장 유용한 일반적인 절차를 제공하려고 노력합니다. 다음 목록은 모델을 추가하는 데 수행해야 할 모든 작업의 요약이며 To-Do 목록으로 사용할 수 있습니다: ☐ (선택 사항) BrandNewBert의 이론적 측면 이해<br> ☐ Hugging Face 개발 환경 준비<br> ☐ 원본 리포지토리의 디버깅 환경 설정<br> ☐ 원본 리포지토리와 체크포인트를 사용하여 `forward()` pass가 성공적으로 실행되는 스크립트 작성<br> ☐ 🤗 Transformers에 모델 스켈레톤 성공적으로 추가<br> ☐ 원본 체크포인트를 🤗 Transformers 체크포인트로 성공적으로 변환<br> ☐ 🤗 Transformers에서 원본 체크포인트와 동일한 출력을 내주는 `forward()` pass 성공적으로 실행<br> ☐ 🤗 Transformers에서 모델 테스트 완료<br> ☐ 🤗 Transformers에 토크나이저 성공적으로 추가<br> ☐ 종단 간 통합 테스트 실행<br> ☐ 문서 작성 완료<br> ☐ 모델 가중치를 허브에 업로드<br> ☐ Pull request 제출<br> ☐ (선택 사항) 데모 노트북 추가 우선, 일반적으로는 `BrandNewBert`의 이론적인 이해로 시작하는 것을 권장합니다. 그러나 이론적 측면을 직접 이해하는 대신 *직접 해보면서* 모델의 이론적 측면을 이해하는 것을 선호하는 경우 바로 `BrandNewBert` 코드 베이스로 빠져드는 것도 괜찮습니다. 이 옵션은 엔지니어링 기술이 이론적 기술보다 더 뛰어난 경우, `BrandNewBert`의 논문을 이해하는 데 어려움이 있는 경우, 또는 과학적인 논문을 읽는 것보다 프로그래밍에 훨씬 더 흥미 있는 경우에 더 적합할 수 있습니다. ### 1. (선택 사항) BrandNewBert의 이론적 측면 [[1-optional-theoretical-aspects-of-brandnewbert]] 만약 그런 서술적인 작업이 존재한다면, *BrandNewBert*의 논문을 읽어보는 시간을 가져야 합니다. 이해하기 어려운 섹션이 많을 수 있습니다. 그렇더라도 걱정하지 마세요! 목표는 논문의 깊은 이론적 이해가 아니라 *BrandNewBert*를 🤗 Transformers에서 효과적으로 재구현하기 위해 필요한 정보를 추출하는 것입니다. 이를 위해 이론적 측면에 너무 많은 시간을 투자할 필요는 없지만 다음과 같은 실제적인 측면에 집중해야 합니다: - *BrandNewBert*는 어떤 유형의 모델인가요? BERT와 유사한 인코더 모델인가요? GPT2와 유사한 디코더 모델인가요? BART와 유사한 인코더-디코더 모델인가요? 이들 간의 차이점에 익숙하지 않은 경우[model_summary](model_summary)를 참조하세요. - *BrandNewBert*의 응용 분야는 무엇인가요? 텍스트 분류인가요? 텍스트 생성인가요? 요약과 같은 Seq2Seq 작업인가요? - *brand_new_bert*와 BERT/GPT-2/BART의 차이점은 무엇인가요? - *brand_new_bert*와 가장 유사한 [🤗 Transformers 모델](https://huggingface.co/transformers/#contents)은 무엇인가요? - 어떤 종류의 토크나이저가 사용되나요? Sentencepiece 토크나이저인가요? Word piece 토크나이저인가요? BERT 또는 BART에 사용되는 동일한 토크나이저인가요? 모델의 아키텍처에 대해 충분히 이해했다는 생각이 든 후, 궁금한 사항이 있으면 Hugging Face 팀에 문의하십시오. 이는 모델의 아키텍처, 어텐션 레이어 등에 관한 질문을 포함할 수 있습니다. Hugging Face의 유지 관리자들은 보통 코드를 검토하는 것에 대해 매우 기뻐하므로 당신을 돕는 일을 매우 환영할 것입니다! ### 2. 개발 환경 설정 [[2-next-prepare-your-environment]] 1. 저장소 페이지에서 "Fork" 버튼을 클릭하여 저장소의 사본을 GitHub 사용자 계정으로 만듭니다. 2. `transformers` fork를 로컬 디스크에 클론하고 베이스 저장소를 원격 저장소로 추가합니다: ```bash git clone https://github.com/[your Github handle]/transformers.git cd transformers git remote add upstream https://github.com/huggingface/transformers.git ``` 3. 개발 환경을 설정합니다. 다음 명령을 실행하여 개발 환경을 설정할 수 있습니다: ```bash python -m venv .env source .env/bin/activate pip install -e ".[dev]" ``` 각 운영 체제에 따라 Transformers의 선택적 의존성이 개수가 증가하면 이 명령이 실패할 수 있습니다. 그런 경우에는 작업 중인 딥 러닝 프레임워크 (PyTorch, TensorFlow 및/또는 Flax)을 설치한 후, 다음 명령을 수행하면 됩니다: ```bash pip install -e ".[quality]" ``` 대부분의 경우에는 이것으로 충분합니다. 그런 다음 상위 디렉토리로 돌아갑니다. ```bash cd .. ``` 4. Transformers에 *brand_new_bert*의 PyTorch 버전을 추가하는 것을 권장합니다. PyTorch를 설치하려면 다음 링크의 지침을 따르십시오: https://pytorch.org/get-started/locally/. **참고:** CUDA를 설치할 필요는 없습니다. 새로운 모델이 CPU에서 작동하도록 만드는 것으로 충분합니다. 5. *brand_new_bert*를 이식하기 위해서는 해당 원본 저장소에 접근할 수 있어야 합니다: ```bash git clone https://github.com/org_that_created_brand_new_bert_org/brand_new_bert.git cd brand_new_bert pip install -e . ``` 이제 *brand_new_bert*를 🤗 Transformers로 이식하기 위한 개발 환경을 설정하였습니다. ### 3.-4. 원본 저장소에서 사전 훈련된 체크포인트 실행하기 [[3.-4.-run-a-pretrained-checkpoint-using-the-original-repository]] 먼저, 원본 *brand_new_bert* 저장소에서 작업을 시작합니다. 원본 구현은 보통 "연구용"으로 많이 사용됩니다. 즉, 문서화가 부족하고 코드가 이해하기 어려울 수 있습니다. 그러나 이것이 바로 *brand_new_bert*를 다시 구현하려는 동기가 되어야 합니다. Hugging Face에서의 주요 목표 중 하나는 **거인의 어깨 위에 서는 것**이며, 이는 여기에서 쉽게 해석되어 동작하는 모델을 가져와서 가능한 한 **접근 가능하고 사용자 친화적이며 아름답게** 만드는 것입니다. 이것은 🤗 Transformers에서 모델을 다시 구현하는 가장 중요한 동기입니다 - 새로운 복잡한 NLP 기술을 **모두에게** 접근 가능하게 만드는 것을 목표로 합니다. 따라서 원본 저장소에 대해 자세히 살펴보는 것으로 시작해야 합니다. 원본 저장소에서 공식 사전 훈련된 모델을 성공적으로 실행하는 것은 종종 **가장 어려운** 단계입니다. 우리의 경험에 따르면, 원본 코드 베이스에 익숙해지는 데 시간을 투자하는 것이 매우 중요합니다. 다음을 파악해야 합니다: - 사전 훈련된 가중치를 어디서 찾을 수 있는지? - 사전 훈련된 가중치를 해당 모델에로드하는 방법은? - 모델과 독립적으로 토크나이저를 실행하는 방법은? - 간단한 forward pass에 필요한 클래스와 함수를 파악하기 위해 forward pass를 한 번 추적해 보세요. 일반적으로 해당 함수들만 다시 구현하면 됩니다. - 모델의 중요한 구성 요소를 찾을 수 있어야 합니다. 모델 클래스는 어디에 있나요? 모델 하위 클래스(*EncoderModel*, *DecoderModel* 등)가 있나요? self-attention 레이어는 어디에 있나요? self-attention, cross-attention 등 여러 가지 다른 어텐션 레이어가 있나요? - 원본 환경에서 모델을 디버그할 수 있는 방법은 무엇인가요? *print* 문을 추가해야 하나요? *ipdb*와 같은 대화식 디버거를 사용할 수 있나요? PyCharm과 같은 효율적인 IDE를 사용해 모델을 디버그할 수 있나요? 원본 저장소에서 코드를 이식하는 작업을 시작하기 전에 원본 저장소에서 코드를 **효율적으로** 디버그할 수 있어야 합니다! 또한, 오픈 소스 라이브러리로 작업하고 있다는 것을 기억해야 합니다. 따라서 원본 저장소에서 issue를 열거나 pull request를 열기를 주저하지 마십시오. 이 저장소의 유지 관리자들은 누군가가 자신들의 코드를 살펴본다는 것에 대해 매우 기뻐할 것입니다! 현재 시점에서, 원래 모델을 디버깅하기 위해 어떤 디버깅 환경과 전략을 선호하는지는 당신에게 달렸습니다. 우리는 고가의 GPU 환경을 구축하는 것은 비추천합니다. 대신, 원래 저장소로 들어가서 작업을 시작할 때와 🤗 Transformers 모델의 구현을 시작할 때에도 CPU에서 작업하는 것이 좋습니다. 모델이 이미 🤗 Transformers로 성공적으로 이식되었을 때에만 모델이 GPU에서도 예상대로 작동하는지 확인해야합니다. 일반적으로, 원래 모델을 실행하기 위한 두 가지 가능한 디버깅 환경이 있습니다. - [Jupyter 노트북](https://jupyter.org/) / [Google Colab](https://colab.research.google.com/notebooks/intro.ipynb) - 로컬 Python 스크립트 Jupyter 노트북의 장점은 셀 단위로 실행할 수 있다는 것입니다. 이는 논리적인 구성 요소를 더 잘 분리하고 중간 결과를 저장할 수 있으므로 디버깅 사이클이 더 빨라질 수 있습니다. 또한, 노트북은 다른 기여자와 쉽게 공유할 수 있으므로 Hugging Face 팀의 도움을 요청하려는 경우 매우 유용할 수 있습니다. Jupyter 노트북에 익숙하다면 이를 사용하는 것을 강력히 추천합니다. Jupyter 노트북의 단점은 사용에 익숙하지 않은 경우 새로운 프로그래밍 환경에 적응하는 데 시간을 할애해야 하며, `ipdb`와 같은 알려진 디버깅 도구를 더 이상 사용할 수 없을 수도 있다는 것입니다. 각 코드 베이스에 대해 좋은 첫 번째 단계는 항상 **작은** 사전 훈련된 체크포인트를 로드하고 더미 정수 벡터 입력을 사용하여 단일 forward pass를 재현하는 것입니다. 이와 같은 스크립트는 다음과 같을 수 있습니다(의사 코드로 작성): ```python model = BrandNewBertModel.load_pretrained_checkpoint("/path/to/checkpoint/") input_ids = [0, 4, 5, 2, 3, 7, 9] # vector of input ids original_output = model.predict(input_ids) ``` 다음으로, 디버깅 전략에 대해 일반적으로 다음과 같은 몇 가지 선택지가 있습니다: - 원본 모델을 많은 작은 테스트 가능한 구성 요소로 분해하고 각각에 대해 forward pass를 실행하여 검증합니다. - 원본 모델을 원본 *tokenizer*과 원본 *model*로만 분해하고 해당 부분에 대해 forward pass를 실행한 후 검증을 위해 중간 출력(print 문 또는 중단점)을 사용합니다. 다시 말하지만, 어떤 전략을 선택할지는 당신에게 달려 있습니다. 원본 코드 베이스에 따라 하나 또는 다른 전략이 유리할 수 있습니다. 원본 코드 베이스를 모델의 작은 하위 구성 요소로 분해할 수 있는지 여부, 예를 들어 원본 코드 베이스가 즉시 실행 모드에서 간단히 실행될 수 있는 경우, 그런 경우에는 그 노력이 가치가 있다는 것이 일반적입니다. 초기에 더 어려운 방법을 선택하는 것에는 몇 가지 중요한 장점이 있습니다. - 원본 모델을 🤗 Transformers 구현과 비교할 때 각 구성 요소가 일치하는지 자동으로 확인할 수 있습니다. 즉, 시각적인 비교(print 문을 통한 비교가 아닌) 대신 🤗 Transformers 구현과 그에 대응하는 원본 구성 요소가 일치하는지 확인할 수 있습니다. - 전체 모델을 모듈별로, 즉 작은 구성 요소로 분해함으로써 모델을 이식하는 큰 문제를 단순히 개별 구성 요소를 이식하는 작은 문제로 분해할 수 있으므로 작업을 더 잘 구조화할 수 있습니다. - 모델을 논리적으로 의미 있는 구성 요소로 분리하는 것은 모델의 설계에 대한 더 나은 개요를 얻고 모델을 더 잘 이해하는 데 도움이 됩니다. - 이러한 구성 요소별 테스트를 통해 코드를 변경하면서 회귀가 발생하지 않도록 보장할 수 있습니다. [Lysandre의 ELECTRA 통합 검사](https://gist.github.com/LysandreJik/db4c948f6b4483960de5cbac598ad4ed)는 이를 수행하는 좋은 예제입니다. 그러나 원본 코드 베이스가 매우 복잡하거나 중간 구성 요소를 컴파일된 모드에서 실행하는 것만 허용하는 경우, 모델을 테스트 가능한 작은 하위 구성 요소로 분해하는 것이 시간이 많이 소요되거나 불가능할 수도 있습니다. [T5의 MeshTensorFlow](https://github.com/tensorflow/mesh/tree/master/mesh_tensorflow) 라이브러리는 매우 복잡하며 모델을 하위 구성 요소로 분해하는 간단한 방법을 제공하지 않습니다. 이러한 라이브러리의 경우, 보통 print 문을 통해 확인합니다. 어떤 전략을 선택하더라도 권장되는 절차는 동일합니다. 먼저 시작 레이어를 디버그하고 마지막 레이어를 마지막에 디버그하는 것이 좋습니다. 다음 순서로 각 레이어의 출력을 검색하는 것이 좋습니다: 1. 모델에 전달된 입력 ID 가져오기 2. 워드 임베딩 가져오기 3. 첫 번째 Transformer 레이어의 입력 가져오기 4. 첫 번째 Transformer 레이어의 출력 가져오기 5. 다음 n-1개의 Transformer 레이어의 출력 가져오기 6. BrandNewBert 모델의 출력 가져오기 입력 ID는 정수 배열로 구성되며, 예를 들어 `input_ids = [0, 4, 4, 3, 2, 4, 1, 7, 19]`와 같을 수 있습니다. 다음 레이어의 출력은 종종 다차원 실수 배열로 구성되며, 다음과 같이 나타낼 수 있습니다: ``` [[ [-0.1465, -0.6501, 0.1993, ..., 0.1451, 0.3430, 0.6024], [-0.4417, -0.5920, 0.3450, ..., -0.3062, 0.6182, 0.7132], [-0.5009, -0.7122, 0.4548, ..., -0.3662, 0.6091, 0.7648], ..., [-0.5613, -0.6332, 0.4324, ..., -0.3792, 0.7372, 0.9288], [-0.5416, -0.6345, 0.4180, ..., -0.3564, 0.6992, 0.9191], [-0.5334, -0.6403, 0.4271, ..., -0.3339, 0.6533, 0.8694]]], ``` 🤗 Transformers에 추가되는 모든 모델은 통합 테스트를 통과해야 합니다. 즉, 원본 모델과 🤗 Transformers의 재구현 버전이 0.001의 정밀도로 정확히 동일한 출력을 내야 합니다! 동일한 모델이 다른 라이브러리에서 작성되었을 때 라이브러리 프레임워크에 따라 약간 다른 출력을 얻는 것은 정상이므로 1e-3(0.001)의 오차는 허용합니다. 거의 동일한 출력을 내는 것만으로는 충분하지 않으며, 완벽히 일치하는 수준이어야 합니다. 따라서 🤗 Transformers 버전의 중간 출력을 *brand_new_bert*의 원래 구현의 중간 출력과 여러 번 비교해야 합니다. 이 경우 원본 저장소의 **효율적인** 디버깅 환경이 절대적으로 중요합니다. 디버깅 환경을 가능한 한 효율적으로 만드는 몇 가지 조언을 제시합니다. - 중간 결과를 디버그하는 가장 좋은 방법을 찾으세요. 원본 저장소가 PyTorch로 작성되었다면 원본 모델을 더 작은 하위 구성 요소로 분해하여 중간 값을 검색하는 긴 스크립트를 작성하는 것에 시간을 투자할 가치가 있습니다. 원본 저장소가 Tensorflow 1로 작성되었다면 [tf.print](https://www.tensorflow.org/api_docs/python/tf/print)와 같은 Tensorflow 출력 작업을 사용하여 중간 값을 출력해야 할 수도 있습니다. 원본 저장소가 Jax로 작성되었다면 forward pass를 실행할 때 모델이 **jit 되지 않도록** 해야 합니다. 예를 들어 [이 링크](https://github.com/google/jax/issues/196)를 확인해 보세요. - 사용 가능한 가장 작은 사전 훈련된 체크포인트를 사용하세요. 체크포인트가 작을수록 디버그 사이클이 더 빨라집니다. 전반적으로 forward pass에 10초 이상이 걸리는 경우 효율적이지 않습니다. 매우 큰 체크포인트만 사용할 수 있는 경우, 새 환경에서 임의로 초기화된 가중치로 더미 모델을 만들고 해당 가중치를 🤗 Transformers 버전과 비교하기 위해 저장하는 것이 더 의미가 있을 수 있습니다. - 디버깅 설정에서 가장 쉽게 forward pass를 호출하는 방법을 사용하세요. 원본 저장소에서 **단일** forward pass만 호출하는 함수를 찾는 것이 이상적입니다. 이 함수는 일반적으로 `predict`, `evaluate`, `forward`, `__call__`과 같이 호출됩니다. `autoregressive_sample`과 같은 텍스트 생성에서 `forward`를 여러 번 호출하여 텍스트를 생성하는 등의 작업을 수행하는 함수를 디버그하고 싶지 않을 것입니다. - 토큰화 과정을 모델의 *forward* pass와 분리하려고 노력하세요. 원본 저장소에서 입력 문자열을 입력해야 하는 예제가 있는 경우, 입력 문자열이 입력 ID로 변경되는 순간을 찾아서 시작하세요. 이 경우 직접 ID를 입력할 수 있도록 작은 스크립트를 작성하거나 원본 코드를 수정해야 할 수도 있습니다. - 디버깅 설정에서 모델이 훈련 모드가 아니라는 것을 확인하세요. 훈련 모드에서는 모델의 여러 드롭아웃 레이어 때문에 무작위 출력이 생성될 수 있습니다. 디버깅 환경에서 forward pass가 **결정론적**이도록 해야 합니다. 또는 동일한 프레임워크에 있는 경우 *transformers.utils.set_seed*를 사용하세요. 다음 섹션에서는 *brand_new_bert*에 대해 이 작업을 수행하는 데 더 구체적인 세부 사항/팁을 제공합니다. ### 5.-14. 🤗 Transformers에 BrandNewBert를 이식하기 [[5.-14.-port-brandnewbert-to-transformers]] 이제, 마침내 🤗 Transformers에 새로운 코드를 추가할 수 있습니다. 🤗 Transformers 포크의 클론으로 이동하세요: ```bash cd transformers ``` 다음과 같이 이미 존재하는 모델의 모델 아키텍처와 정확히 일치하는 모델을 추가하는 특별한 경우에는 [이 섹션](#write-a-conversion-script)에 설명된대로 변환 스크립트만 추가하면 됩니다. 이 경우에는 이미 존재하는 모델의 전체 모델 아키텍처를 그대로 재사용할 수 있습니다. 그렇지 않으면 새로운 모델 생성을 시작합시다. 여기에서 두 가지 선택지가 있습니다: - `transformers-cli add-new-model-like`를 사용하여 기존 모델과 유사한 새로운 모델 추가하기 - `transformers-cli add-new-model`을 사용하여 템플릿을 기반으로 한 새로운 모델 추가하기 (선택한 모델 유형에 따라 BERT 또는 Bart와 유사한 모습일 것입니다) 두 경우 모두, 모델의 기본 정보를 입력하는 설문조사가 제시됩니다. 두 번째 명령어는 `cookiecutter`를 설치해야 합니다. 자세한 정보는 [여기](https://github.com/huggingface/transformers/tree/main/templates/adding_a_new_model)에서 확인할 수 있습니다. **huggingface/transformers 메인 저장소에 Pull Request 열기** 자동으로 생성된 코드를 수정하기 전에, 지금은 "작업 진행 중 (WIP)" 풀 리퀘스트를 열기 위한 시기입니다. 예를 들어, 🤗 Transformers에 "*brand_new_bert* 추가"라는 제목의 "[WIP] Add *brand_new_bert*" 풀 리퀘스트를 엽니다. 이렇게 하면 당신과 Hugging Face 팀이 🤗 Transformers에 모델을 통합하는 작업을 함께할 수 있습니다. 다음을 수행해야 합니다: 1. 메인 브랜치에서 작업을 잘 설명하는 이름으로 브랜치 생성 ```bash git checkout -b add_brand_new_bert ``` 2. 자동으로 생성된 코드 커밋 ```bash git add . git commit ``` 3. 현재 메인을 가져오고 리베이스 ```bash git fetch upstream git rebase upstream/main ``` 4. 변경 사항을 계정에 푸시 ```bash git push -u origin a-descriptive-name-for-my-changes ``` 5. 만족스럽다면, GitHub에서 자신의 포크한 웹 페이지로 이동합니다. "Pull request"를 클릭합니다. Hugging Face 팀의 일부 멤버의 GitHub 핸들을 리뷰어로 추가하여 Hugging Face 팀이 앞으로의 변경 사항에 대해 알림을 받을 수 있도록 합니다. 6. GitHub 풀 리퀘스트 웹 페이지 오른쪽에 있는 "Convert to draft"를 클릭하여 PR을 초안으로 변경합니다. 다음으로, 어떤 진전을 이루었다면 작업을 커밋하고 계정에 푸시하여 풀 리퀘스트에 표시되도록 해야 합니다. 또한, 다음과 같이 현재 메인과 작업을 업데이트해야 합니다: ```bash git fetch upstream git merge upstream/main ``` 일반적으로, 모델 또는 구현에 관한 모든 질문은 자신의 PR에서 해야 하며, PR에서 토론되고 해결되어야 합니다. 이렇게 하면 Hugging Face 팀이 새로운 코드를 커밋하거나 질문을 할 때 항상 알림을 받을 수 있습니다. Hugging Face 팀에게 문제 또는 질문을 효율적으로 이해할 수 있도록 추가한 코드를 명시하는 것이 도움이 될 때가 많습니다. 이를 위해, 변경 사항을 모두 볼 수 있는 "Files changed" 탭으로 이동하여 질문하고자 하는 줄로 이동한 다음 "+" 기호를 클릭하여 코멘트를 추가할 수 있습니다. 질문이나 문제가 해결되면, 생성된 코멘트의 "Resolve" 버튼을 클릭할 수 있습니다. 마찬가지로, Hugging Face 팀은 코드를 리뷰할 때 코멘트를 남길 것입니다. 우리는 PR에서 대부분의 질문을 GitHub에서 묻는 것을 권장합니다. 공개에 크게 도움이 되지 않는 매우 일반적인 질문의 경우, Slack이나 이메일을 통해 Hugging Face 팀에게 문의할 수 있습니다. **5. brand_new_bert에 대해 생성된 모델 코드를 적용하기** 먼저, 우리는 모델 자체에만 초점을 맞추고 토크나이저에 대해서는 신경 쓰지 않을 것입니다. 모든 관련 코드는 다음의 생성된 파일에서 찾을 수 있습니다: `src/transformers/models/brand_new_bert/modeling_brand_new_bert.py` 및 `src/transformers/models/brand_new_bert/configuration_brand_new_bert.py`. 이제 마침내 코딩을 시작할 수 있습니다 :). `src/transformers/models/brand_new_bert/modeling_brand_new_bert.py`의 생성된 코드는 인코더 전용 모델인 경우 BERT와 동일한 아키텍처를 가지거나, 인코더-디코더 모델인 경우 BART와 동일한 아키텍처를 가질 것입니다. 이 시점에서, 모델의 이론적 측면에 대해 배운 내용을 다시 상기해야 합니다: *모델이 BERT 또는 BART와 어떻게 다른가요?*. 자주 변경해야 하는 것은 *self-attention* 레이어, 정규화 레이어의 순서 등을 변경하는 것입니다. 다시 말하지만, 자신의 모델을 구현하는 데 도움이 되도록 Transformers에서 이미 존재하는 모델의 유사한 아키텍처를 살펴보는 것이 유용할 수 있습니다. **참고로** 이 시점에서, 코드가 완전히 정확하거나 깨끗하다고 확신할 필요는 없습니다. 오히려 처음에는 원본 코드의 첫 번째 *불완전하고* 복사된 버전을 `src/transformers/models/brand_new_bert/modeling_brand_new_bert.py`에 추가하는 것이 좋습니다. 필요한 모든 코드가 추가될 때까지 이러한 작업을 진행한 후, 다음 섹션에서 설명한 변환 스크립트를 사용하여 코드를 점진적으로 개선하고 수정하는 것이 훨씬 효율적입니다. 이 시점에서 작동해야 하는 유일한 것은 다음 명령이 작동하는 것입니다: ```python from transformers import BrandNewBertModel, BrandNewBertConfig model = BrandNewBertModel(BrandNewBertConfig()) ``` 위의 명령은 `BrandNewBertConfig()`에 정의된 기본 매개변수에 따라 무작위 가중치로 모델을 생성하며, 이로써 모든 구성 요소의 `init()` 메서드가 작동함을 보장합니다. 모든 무작위 초기화는 `BrandnewBertPreTrainedModel` 클래스의 `_init_weights` 메서드에서 수행되어야 합니다. 이 메서드는 구성 설정 변수에 따라 모든 리프 모듈을 초기화해야 합니다. BERT의 `_init_weights` 메서드 예제는 다음과 같습니다: ```py def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) ``` 몇 가지 모듈에 대해 특별한 초기화가 필요한 경우 사용자 정의 방식을 사용할 수도 있습니다. 예를 들어, `Wav2Vec2ForPreTraining`에서 마지막 두 개의 선형 레이어는 일반적인 PyTorch `nn.Linear`의 초기화를 가져야 하지만, 다른 모든 레이어는 위와 같은 초기화를 사용해야 합니다. 이는 다음과 같이 코드화됩니다: ```py def _init_weights(self, module): """Initialize the weights""" if isinstance(module, Wav2Vec2ForPreTraining): module.project_hid.reset_parameters() module.project_q.reset_parameters() module.project_hid._is_hf_initialized = True module.project_q._is_hf_initialized = True elif isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() ``` `_is_hf_initialized` 플래그는 서브모듈을 한 번만 초기화하도록 내부적으로 사용됩니다. `module.project_q` 및 `module.project_hid`에 대해 `True`로 설정함으로써, 우리가 수행한 사용자 정의 초기화가 이후에 덮어쓰이지 않도록 합니다. 즉, `_init_weights` 함수가 이들에게 적용되지 않습니다. **6. 변환 스크립트 작성하기** 다음으로, 디버그에 사용한 체크포인트를 기존 저장소에서 만든 🤗 Transformers 구현과 호환되는 체크포인트로 변환할 수 있는 변환 스크립트를 작성해야 합니다. 변환 스크립트를 처음부터 작성하는 것보다는 *brand_new_bert*와 동일한 프레임워크로 작성된 유사한 모델을 변환한 기존 변환 스크립트를 찾아보는 것이 좋습니다. 일반적으로 기존 변환 스크립트를 복사하여 사용 사례에 맞게 약간 수정하는 것으로 충분합니다. 모델에 대해 유사한 기존 변환 스크립트를 어디에서 찾을 수 있는지 Hugging Face 팀에게 문의하는 것을 망설이지 마세요. - TensorFlow에서 PyTorch로 모델을 이전하는 경우, 좋은 참고 자료로 BERT의 변환 스크립트 [여기](https://github.com/huggingface/transformers/blob/7acfa95afb8194f8f9c1f4d2c6028224dbed35a2/src/transformers/models/bert/modeling_bert.py#L91)를 참조할 수 있습니다. - PyTorch에서 PyTorch로 모델을 이전하는 경우, 좋은 참고 자료로 BART의 변환 스크립트 [여기](https://github.com/huggingface/transformers/blob/main/src/transformers/models/bart/convert_bart_original_pytorch_checkpoint_to_pytorch.py)를 참조할 수 있습니다. 다음에서는 PyTorch 모델이 레이어 가중치를 저장하고 레이어 이름을 정의하는 방법에 대해 간단히 설명하겠습니다. PyTorch에서 레이어의 이름은 레이어에 지정한 클래스 속성의 이름으로 정의됩니다. 다음과 같이 PyTorch에서 `SimpleModel`이라는 더미 모델을 정의해 봅시다: ```python from torch import nn class SimpleModel(nn.Module): def __init__(self): super().__init__() self.dense = nn.Linear(10, 10) self.intermediate = nn.Linear(10, 10) self.layer_norm = nn.LayerNorm(10) ``` 이제 이 모델 정의의 인스턴스를 생성할 수 있으며 `dense`, `intermediate`, `layer_norm` 등의 가중치가 랜덤하게 할당됩니다. 모델을 출력하여 아키텍처를 확인할 수 있습니다. ```python model = SimpleModel() print(model) ``` 이는 다음과 같이 출력됩니다: ``` SimpleModel( (dense): Linear(in_features=10, out_features=10, bias=True) (intermediate): Linear(in_features=10, out_features=10, bias=True) (layer_norm): LayerNorm((10,), eps=1e-05, elementwise_affine=True) ) ``` 우리는 레이어의 이름이 PyTorch에서 클래스 속성의 이름으로 정의되어 있는 것을 볼 수 있습니다. 특정 레이어의 가중치 값을 출력하여 확인할 수 있습니다: ```python print(model.dense.weight.data) ``` 가중치가 무작위로 초기화되었음을 확인할 수 있습니다. ``` tensor([[-0.0818, 0.2207, -0.0749, -0.0030, 0.0045, -0.1569, -0.1598, 0.0212, -0.2077, 0.2157], [ 0.1044, 0.0201, 0.0990, 0.2482, 0.3116, 0.2509, 0.2866, -0.2190, 0.2166, -0.0212], [-0.2000, 0.1107, -0.1999, -0.3119, 0.1559, 0.0993, 0.1776, -0.1950, -0.1023, -0.0447], [-0.0888, -0.1092, 0.2281, 0.0336, 0.1817, -0.0115, 0.2096, 0.1415, -0.1876, -0.2467], [ 0.2208, -0.2352, -0.1426, -0.2636, -0.2889, -0.2061, -0.2849, -0.0465, 0.2577, 0.0402], [ 0.1502, 0.2465, 0.2566, 0.0693, 0.2352, -0.0530, 0.1859, -0.0604, 0.2132, 0.1680], [ 0.1733, -0.2407, -0.1721, 0.1484, 0.0358, -0.0633, -0.0721, -0.0090, 0.2707, -0.2509], [-0.1173, 0.1561, 0.2945, 0.0595, -0.1996, 0.2988, -0.0802, 0.0407, 0.1829, -0.1568], [-0.1164, -0.2228, -0.0403, 0.0428, 0.1339, 0.0047, 0.1967, 0.2923, 0.0333, -0.0536], [-0.1492, -0.1616, 0.1057, 0.1950, -0.2807, -0.2710, -0.1586, 0.0739, 0.2220, 0.2358]]). ``` 변환 스크립트에서는 이러한 무작위로 초기화된 가중치를 체크포인트의 해당 레이어의 정확한 가중치로 채워야 합니다. 예를 들면 다음과 같습니다: ```python # retrieve matching layer weights, e.g. by # recursive algorithm layer_name = "dense" pretrained_weight = array_of_dense_layer model_pointer = getattr(model, "dense") model_pointer.weight.data = torch.from_numpy(pretrained_weight) ``` 이렇게 하면 PyTorch 모델의 무작위로 초기화된 각 가중치와 해당 체크포인트 가중치가 **모양과 이름** 모두에서 정확히 일치하는지 확인해야 합니다. 이를 위해 모양에 대한 assert 문을 추가하고 체크포인트 가중치의 이름을 출력해야 합니다. 예를 들어 다음과 같은 문장을 추가해야 합니다: ```python assert ( model_pointer.weight.shape == pretrained_weight.shape ), f"Pointer shape of random weight {model_pointer.shape} and array shape of checkpoint weight {pretrained_weight.shape} mismatched" ``` 또한 두 가중치의 이름을 출력하여 일치하는지 확인해야 합니다. *예시*: ```python logger.info(f"Initialize PyTorch weight {layer_name} from {pretrained_weight.name}") ``` 모양 또는 이름이 일치하지 않는 경우, 랜덤으로 초기화된 레이어에 잘못된 체크포인트 가중치를 할당한 것으로 추측됩니다. 잘못된 모양은 `BrandNewBertConfig()`의 구성 매개변수 설정이 변환하려는 체크포인트에 사용된 설정과 정확히 일치하지 않기 때문일 가능성이 가장 큽니다. 그러나 PyTorch의 레이어 구현 자체에서 가중치를 전치해야 할 수도 있습니다. 마지막으로, **모든** 필요한 가중치가 초기화되었는지 확인하고 초기화에 사용되지 않은 모든 체크포인트 가중치를 출력하여 모델이 올바르게 변환되었는지 확인해야 합니다. 잘못된 모양 문장이나 잘못된 이름 할당으로 인해 변환 시도가 실패하는 것은 완전히 정상입니다. 이는 `BrandNewBertConfig()`에서 잘못된 매개변수를 사용하거나 🤗 Transformers 구현에서 잘못된 아키텍처, 🤗 Transformers 구현의 구성 요소 중 하나의 `init()` 함수에 버그가 있는 경우이거나 체크포인트 가중치 중 하나를 전치해야 하는 경우일 가능성이 가장 높습니다. 이 단계는 이전 단계와 함께 반복되어야 하며 모든 체크포인트의 가중치가 Transformers 모델에 올바르게 로드되었을 때까지 계속되어야 합니다. 🤗 Transformers 구현에 체크포인트를 올바르게 로드한 후에는 `/path/to/converted/checkpoint/folder`와 같은 원하는 폴더에 모델을 저장할 수 있어야 합니다. 해당 폴더에는 `pytorch_model.bin` 파일과 `config.json` 파일이 모두 포함되어야 합니다. ```python model.save_pretrained("/path/to/converted/checkpoint/folder") ``` **7. 순방향 패스 구현하기** 🤗 Transformers 구현에 사전 훈련된 가중치를 정확하게 로드한 후에는 순방향 패스가 올바르게 구현되었는지 확인해야 합니다. [원본 저장소에 익숙해지기](#3-4-run-a-pretrained-checkpoint-using-the-original-repository)에서 이미 원본 저장소를 사용하여 모델의 순방향 패스를 실행하는 스크립트를 만들었습니다. 이제 원본 대신 🤗 Transformers 구현을 사용하는 유사한 스크립트를 작성해야 합니다. 다음과 같이 작성되어야 합니다: ```python model = BrandNewBertModel.from_pretrained("/path/to/converted/checkpoint/folder") input_ids = [0, 4, 4, 3, 2, 4, 1, 7, 19] output = model(input_ids).last_hidden_states ``` 🤗 Transformers 구현과 원본 모델 구현이 처음부터 정확히 동일한 출력을 제공하지 않거나 순방향 패스에서 오류가 발생할 가능성이 매우 높습니다. 실망하지 마세요. 예상된 일입니다! 먼저, 순방향 패스에서 오류가 발생하지 않도록 해야 합니다. 종종 잘못된 차원이 사용되어 *차원 불일치* 오류가 발생하거나 잘못된 데이터 유형 개체가 사용되는 경우가 있습니다. 예를 들면 `torch.long` 대신에 `torch.float32`가 사용된 경우입니다. 해결할 수 없는 오류가 발생하면 Hugging Face 팀에 도움을 요청하는 것이 좋습니다. 🤗 Transformers 구현이 올바르게 작동하는지 확인하는 마지막 단계는 출력이 `1e-3`의 정밀도로 동일한지 확인하는 것입니다. 먼저, 출력 모양이 동일하도록 보장해야 합니다. 즉, 🤗 Transformers 구현 스크립트와 원본 구현 사이에서 `outputs.shape`는 동일한 값을 반환해야 합니다. 그 다음으로, 출력 값이 동일하도록 해야 합니다. 이는 새로운 모델을 추가할 때 가장 어려운 부분 중 하나입니다. 출력이 동일하지 않은 일반적인 실수 사례는 다음과 같습니다: - 일부 레이어가 추가되지 않았습니다. 즉, *활성화* 레이어가 추가되지 않았거나 잔차 연결이 빠졌습니다. - 단어 임베딩 행렬이 연결되지 않았습니다. - 잘못된 위치 임베딩이 사용되었습니다. 원본 구현에서는 오프셋을 사용합니다. - 순방향 패스 중에 Dropout이 적용되었습니다. 이를 수정하려면 *model.training이 False*인지 확인하고 순방향 패스 중에 Dropout 레이어가 잘못 활성화되지 않도록 하세요. 즉, [PyTorch의 기능적 Dropout](https://pytorch.org/docs/stable/nn.functional.html?highlight=dropout#torch.nn.functional.dropout)에 *self.training*을 전달하세요. 문제를 해결하는 가장 좋은 방법은 일반적으로 원본 구현과 🤗 Transformers 구현의 순방향 패스를 나란히 놓고 차이점이 있는지 확인하는 것입니다. 이상적으로는 순방향 패스의 중간 출력을 디버그/출력하여 원본 구현과 🤗 Transformers 구현의 정확한 위치를 찾을 수 있어야 합니다. 먼저, 두 스크립트의 하드코딩된 `input_ids`가 동일한지 확인하세요. 다음으로, `input_ids`의 첫 번째 변환의 출력(일반적으로 단어 임베딩)이 동일한지 확인하세요. 그런 다음 네트워크의 가장 마지막 레이어까지 진행해보세요. 어느 시점에서 두 구현 사이에 차이가 있는 것을 알게 되는데, 이는 🤗 Transformers 구현의 버그 위치를 가리킬 것입니다. 저희 경험상으로는 원본 구현과 🤗 Transformers 구현 모두에서 동일한 위치에 많은 출력 문을 추가하고 이들의 중간 표현에 대해 동일한 값을 보이는 출력 문을 연속적으로 제거하는 것이 간단하고 효과적인 방법입니다. `torch.allclose(original_output, output, atol=1e-3)`로 출력을 확인하여 두 구현이 동일한 출력을 하는 것을 확신한다면, 가장 어려운 부분은 끝났습니다! 축하드립니다. 남은 작업은 쉬운 일이 될 것입니다 😊. **8. 필요한 모든 모델 테스트 추가하기** 이 시점에서 새로운 모델을 성공적으로 추가했습니다. 그러나 해당 모델이 요구되는 디자인에 완전히 부합하지 않을 수도 있습니다. 🤗 Transformers와 완벽하게 호환되는 구현인지 확인하기 위해 모든 일반 테스트를 통과해야 합니다. Cookiecutter는 아마도 모델을 위한 테스트 파일을 자동으로 추가했을 것입니다. 아마도 `tests/models/brand_new_bert/test_modeling_brand_new_bert.py`와 같은 경로에 위치할 것입니다. 이 테스트 파일을 실행하여 일반 테스트가 모두 통과하는지 확인하세요. ```bash pytest tests/models/brand_new_bert/test_modeling_brand_new_bert.py ``` 모든 일반 테스트를 수정한 후, 이제 수행한 작업을 충분히 테스트하여 다음 사항을 보장해야 합니다. - a) 커뮤니티가 *brand_new_bert*의 특정 테스트를 살펴봄으로써 작업을 쉽게 이해할 수 있도록 함 - b) 모델에 대한 향후 변경 사항이 모델의 중요한 기능을 손상시키지 않도록 함 먼저 통합 테스트를 추가해야 합니다. 이러한 통합 테스트는 이전에 모델을 🤗 Transformers로 구현하기 위해 사용한 디버깅 스크립트와 동일한 작업을 수행합니다. Cookiecutter에 이미 이러한 모델 테스트의 템플릿인 `BrandNewBertModelIntegrationTests`가 추가되어 있으며, 여러분이 작성해야 할 내용으로만 채워 넣으면 됩니다. 이러한 테스트가 통과하는지 확인하려면 다음을 실행하세요. ```bash RUN_SLOW=1 pytest -sv tests/models/brand_new_bert/test_modeling_brand_new_bert.py::BrandNewBertModelIntegrationTests ``` <Tip> Windows를 사용하는 경우 `RUN_SLOW=1`을 `SET RUN_SLOW=1`로 바꿔야 합니다. </Tip> 둘째로, *brand_new_bert*에 특화된 모든 기능도 별도의 테스트에서 추가로 테스트해야 합니다. 이 부분은 종종 잊히는데, 두 가지 측면에서 굉장히 유용합니다. - *brand_new_bert*의 특수 기능이 어떻게 작동해야 하는지 보여줌으로써 커뮤니티에게 모델 추가 과정에서 습득한 지식을 전달하는 데 도움이 됩니다. - 향후 기여자는 이러한 특수 테스트를 실행하여 모델에 대한 변경 사항을 빠르게 테스트할 수 있습니다. **9. 토크나이저 구현하기** 다음으로, *brand_new_bert*의 토크나이저를 추가해야 합니다. 보통 토크나이저는 🤗 Transformers의 기존 토크나이저와 동일하거나 매우 유사합니다. 토크나이저가 올바르게 작동하는지 확인하기 위해 먼저 원본 리포지토리에서 문자열을 입력하고 `input_ids`를 반환하는 스크립트를 생성하는 것이 좋습니다. 다음과 같은 유사한 스크립트일 수 있습니다 (의사 코드로 작성): ```python input_str = "This is a long example input string containing special characters .$?-, numbers 2872 234 12 and words." model = BrandNewBertModel.load_pretrained_checkpoint("/path/to/checkpoint/") input_ids = model.tokenize(input_str) ``` 원본 리포지토리를 자세히 살펴보고 올바른 토크나이저 함수를 찾거나, 복제본에서 변경 사항을 적용하여 `input_ids`만 출력하도록 해야 합니다. 원본 리포지토리를 사용하는 기능적인 토큰화 스크립트를 작성한 후, 🤗 Transformers의 유사한 스크립트를 생성해야 합니다. 다음과 같이 작성되어야 합니다: ```python from transformers import BrandNewBertTokenizer input_str = "This is a long example input string containing special characters .$?-, numbers 2872 234 12 and words." tokenizer = BrandNewBertTokenizer.from_pretrained("/path/to/tokenizer/folder/") input_ids = tokenizer(input_str).input_ids ``` 두 개의 `input_ids`가 동일한 값을 반환할 때, 마지막 단계로 토크나이저 테스트 파일도 추가해야 합니다. *brand_new_bert*의 모델링 테스트 파일과 유사하게, *brand_new_bert*의 토크나이제이션 테스트 파일에는 몇 가지 하드코딩된 통합 테스트가 포함되어야 합니다. **10. 종단 간 통합 테스트 실행** 토크나이저를 추가한 후에는 모델과 토크나이저를 사용하여 몇 가지 종단 간 통합 테스트를 추가해야 합니다. `tests/models/brand_new_bert/test_modeling_brand_new_bert.py`에 추가해주세요. 이러한 테스트는 🤗 Transformers 구현이 예상대로 작동하는지를 의미 있는 text-to-text 예시로 보여줘야 합니다. 그 예시로는 *예를 들어* source-to-target 번역 쌍, article-to-summary 쌍, question-to-answer 쌍 등이 포함될 수 있습니다. 불러온 체크포인트 중 어느 것도 다운스트림 작업에서 미세 조정되지 않았다면, 모델 테스트만으로 충분합니다. 모델이 완전히 기능을 갖추었는지 확인하기 위해 마지막 단계로 GPU에서 모든 테스트를 실행하는 것이 좋습니다. 모델의 내부 텐서의 일부에 `.to(self.device)` 문을 추가하는 것을 잊었을 수 있으며, 이 경우 테스트에서 오류로 표시됩니다. GPU에 액세스할 수 없는 경우, Hugging Face 팀이 테스트를 대신 실행할 수 있습니다. **11. 기술문서 추가** 이제 *brand_new_bert*에 필요한 모든 기능이 추가되었습니다. 거의 끝났습니다! 추가해야 할 것은 멋진 기술문서과 기술문서 페이지입니다. Cookiecutter가 `docs/source/model_doc/brand_new_bert.md`라는 템플릿 파일을 추가해줬을 것입니다. 이 페이지를 사용하기 전에 모델을 사용하는 사용자들은 일반적으로 이 페이지를 먼저 확인합니다. 따라서 문서는 이해하기 쉽고 간결해야 합니다. 모델을 사용하는 방법을 보여주기 위해 *팁*을 추가하는 것이 커뮤니티에 매우 유용합니다. 독스트링에 관련하여 Hugging Face 팀에 문의하는 것을 주저하지 마세요. 다음으로, `src/transformers/models/brand_new_bert/modeling_brand_new_bert.py`에 추가된 독스트링이 올바르며 필요한 모든 입력 및 출력을 포함하도록 확인하세요. [여기](writing-documentation)에서 우리의 문서 작성 가이드와 독스트링 형식에 대한 상세 가이드가 있습니다. 문서는 일반적으로 커뮤니티와 모델의 첫 번째 접점이기 때문에, 문서는 적어도 코드만큼의 주의를 기울여야 합니다. **코드 리팩토링** 좋아요, 이제 *brand_new_bert*를 위한 모든 필요한 코드를 추가했습니다. 이 시점에서 다음을 실행하여 잠재적으로 잘못된 코드 스타일을 수정해야 합니다: 그리고 코딩 스타일이 품질 점검을 통과하는지 확인하기 위해 다음을 실행하고 확인해야 합니다: ```bash make style ``` 🤗 Transformers에는 여전히 실패할 수 있는 몇 가지 매우 엄격한 디자인 테스트가 있습니다. 이는 독스트링에 누락된 정보나 잘못된 명명 때문에 종종 발생합니다. 여기서 막히면 Hugging Face 팀이 도움을 줄 것입니다. ```bash make quality ``` 마지막으로, 코드가 정확히 작동하는 것을 확인한 후에는 항상 코드를 리팩토링하는 것이 좋은 생각입니다. 모든 테스트가 통과된 지금은 추가한 코드를 다시 검토하고 리팩토링하는 좋은 시기입니다. 이제 코딩 부분을 완료했습니다. 축하합니다! 🎉 멋져요! 😎 **12. 모델을 모델 허브에 업로드하세요** 이 마지막 파트에서는 모든 체크포인트를 변환하여 모델 허브에 업로드하고 각 업로드된 모델 체크포인트에 대한 모델 카드를 추가해야 합니다. [Model sharing and uploading Page](model_sharing)를 읽고 허브 기능에 익숙해지세요. *brand_new_bert*의 저자 조직 아래에 모델을 업로드할 수 있는 필요한 액세스 권한을 얻기 위해 Hugging Face 팀과 협업해야 합니다. `transformers`의 모든 모델에 있는 `push_to_hub` 메서드는 체크포인트를 허브에 빠르고 효율적으로 업로드하는 방법입니다. 아래에 작은 코드 조각이 붙여져 있습니다: 각 체크포인트에 적합한 모델 카드를 만드는 데 시간을 할애하는 것은 가치가 있습니다. 모델 카드는 체크포인트의 특성을 강조해야 합니다. *예를 들어* 이 체크포인트는 어떤 데이터셋에서 사전 훈련/세부 훈련되었는지? 이 모델은 어떤 하위 작업에서 사용해야 하는지? 그리고 모델을 올바르게 사용하는 방법에 대한 몇 가지 코드도 포함해야 합니다. ```python brand_new_bert.push_to_hub("brand_new_bert") # Uncomment the following line to push to an organization. # brand_new_bert.push_to_hub("<organization>/brand_new_bert") ``` **13. (선택 사항) 노트북 추가** *brand_new_bert*를 다운스트림 작업에서 추론 또는 미세 조정에 사용하는 방법을 자세히 보여주는 노트북을 추가하는 것이 매우 유용합니다. 이것은 PR을 병합하는 데 필수적이지는 않지만 커뮤니티에 매우 유용합니다. **14. 완료된 PR 제출** 이제 프로그래밍을 마쳤으며, 마지막 단계로 PR을 메인 브랜치에 병합해야 합니다. 보통 Hugging Face 팀은 이미 여기까지 도움을 주었을 것입니다. 그러나 PR에 멋진 설명을 추가하고 리뷰어에게 특정 디자인 선택 사항을 강조하려면 완료된 PR에 약간의 설명을 추가하는 시간을 할애하는 것이 가치가 있습니다. ### 작업물을 공유하세요!! [[share-your-work]] 이제 커뮤니티에서 작업물을 인정받을 시간입니다! 모델 추가 작업을 완료하는 것은 Transformers와 전체 NLP 커뮤니티에 큰 기여입니다. 당신의 코드와 이식된 사전 훈련된 모델은 수백, 심지어 수천 명의 개발자와 연구원에 의해 확실히 사용될 것입니다. 당신의 작업에 자랑스러워해야 하며 이를 커뮤니티와 공유해야 합니다. **당신은 커뮤니티 내 모든 사람들에게 매우 쉽게 접근 가능한 또 다른 모델을 만들었습니다! 🤯**
transformers/docs/source/ko/add_new_model.md/0
{ "file_path": "transformers/docs/source/ko/add_new_model.md", "repo_id": "transformers", "token_count": 43462 }
283
<!--⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 열심히 번역 중입니다. 조금 이따 만나요!
transformers/docs/source/ko/in_translation.md/0
{ "file_path": "transformers/docs/source/ko/in_translation.md", "repo_id": "transformers", "token_count": 95 }
284
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 단일 GPU에서 효율적인 추론 [[efficient-inference-on-a-single-gpu]] 이 가이드 외에도, [단일 GPU에서의 훈련 가이드](perf_train_gpu_one)와 [CPU에서의 추론 가이드](perf_infer_cpu)에서도 관련 정보를 찾을 수 있습니다. ## Better Transformer: PyTorch 네이티브 Transformer 패스트패스 [[better-transformer-pytorchnative-transformer-fastpath]] PyTorch 네이티브 [`nn.MultiHeadAttention`](https://pytorch.org/blog/a-better-transformer-for-fast-transformer-encoder-inference/) 어텐션 패스트패스인 BetterTransformer는 [🤗 Optimum 라이브러리](https://huggingface.co/docs/optimum/bettertransformer/overview)의 통합을 통해 Transformers와 함께 사용할 수 있습니다. PyTorch의 어텐션 패스트패스는 커널 퓨전과 [중첩된 텐서](https://pytorch.org/docs/stable/nested.html)의 사용을 통해 추론 속도를 높일 수 있습니다. 자세한 벤치마크는 [이 블로그 글](https://medium.com/pytorch/bettertransformer-out-of-the-box-performance-for-huggingface-transformers-3fbe27d50ab2)에서 확인할 수 있습니다. [`optimum`](https://github.com/huggingface/optimum) 패키지를 설치한 후에는 추론 중 Better Transformer를 사용할 수 있도록 [`~PreTrainedModel.to_bettertransformer`]를 호출하여 관련 내부 모듈을 대체합니다: ```python model = model.to_bettertransformer() ``` [`~PreTrainedModel.reverse_bettertransformer`] 메소드는 정규화된 transformers 모델링을 사용하기 위해 모델을 저장하기 전 원래의 모델링으로 돌아갈 수 있도록 해줍니다: ```python model = model.reverse_bettertransformer() model.save_pretrained("saved_model") ``` PyTorch 2.0부터는 어텐션 패스트패스가 인코더와 디코더 모두에서 지원됩니다. 지원되는 아키텍처 목록은 [여기](https://huggingface.co/docs/optimum/bettertransformer/overview#supported-models)에서 확인할 수 있습니다. ## FP4 혼합 정밀도 추론을 위한 `bitsandbytes` 통합 [[bitsandbytes-integration-for-fp4-mixedprecision-inference]] `bitsandbytes`를 설치하면 GPU에서 손쉽게 모델을 압축할 수 있습니다. FP4 양자화를 사용하면 원래의 전체 정밀도 버전과 비교하여 모델 크기를 최대 8배 줄일 수 있습니다. 아래에서 시작하는 방법을 확인하세요. <Tip> 이 기능은 다중 GPU 설정에서도 사용할 수 있습니다. </Tip> ### 요구 사항 [[requirements-for-fp4-mixedprecision-inference]] - 최신 `bitsandbytes` 라이브러리 `pip install bitsandbytes>=0.39.0` - 최신 `accelerate`를 소스에서 설치 `pip install git+https://github.com/huggingface/accelerate.git` - 최신 `transformers`를 소스에서 설치 `pip install git+https://github.com/huggingface/transformers.git` ### FP4 모델 실행 - 단일 GPU 설정 - 빠른 시작 [[running-fp4-models-single-gpu-setup-quickstart]] 다음 코드를 실행하여 단일 GPU에서 빠르게 FP4 모델을 실행할 수 있습니다. ```py from transformers import AutoModelForCausalLM model_name = "bigscience/bloom-2b5" model_4bit = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", load_in_4bit=True) ``` `device_map`은 선택 사항입니다. 그러나 `device_map = 'auto'`로 설정하는 것이 사용 가능한 리소스를 효율적으로 디스패치하기 때문에 추론에 있어 권장됩니다. ### FP4 모델 실행 - 다중 GPU 설정 [[running-fp4-models-multi-gpu-setup]] 다중 GPU에서 혼합 4비트 모델을 가져오는 방법은 단일 GPU 설정과 동일합니다(동일한 명령어 사용): ```py model_name = "bigscience/bloom-2b5" model_4bit = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", load_in_4bit=True) ``` 하지만 `accelerate`를 사용하여 각 GPU에 할당할 GPU RAM을 제어할 수 있습니다. 다음과 같이 `max_memory` 인수를 사용하세요: ```py max_memory_mapping = {0: "600MB", 1: "1GB"} model_name = "bigscience/bloom-3b" model_4bit = AutoModelForCausalLM.from_pretrained( model_name, device_map="auto", load_in_4bit=True, max_memory=max_memory_mapping ) ``` 이 예에서는 첫 번째 GPU가 600MB의 메모리를 사용하고 두 번째 GPU가 1GB를 사용합니다. ### 고급 사용법 [[advanced-usage]] 이 방법의 더 고급 사용법에 대해서는 [양자화](main_classes/quantization) 문서 페이지를 참조하세요. ## Int8 혼합 정밀도 행렬 분해를 위한 `bitsandbytes` 통합 [[bitsandbytes-integration-for-int8-mixedprecision-matrix-decomposition]] <Tip> 이 기능은 다중 GPU 설정에서도 사용할 수 있습니다. </Tip> [`LLM.int8() : 8-bit Matrix Multiplication for Transformers at Scale`](https://arxiv.org/abs/2208.07339) 논문에서 우리는 몇 줄의 코드로 Hub의 모든 모델에 대한 Hugging Face 통합을 지원합니다. 이 방법은 `float16` 및 `bfloat16` 가중치에 대해 `nn.Linear` 크기를 2배로 줄이고, `float32` 가중치에 대해 4배로 줄입니다. 이는 절반 정밀도에서 이상치를 처리함으로써 품질에 거의 영향을 미치지 않습니다. ![HFxbitsandbytes.png](https://cdn-uploads.huggingface.co/production/uploads/1659861207959-62441d1d9fdefb55a0b7d12c.png) Int8 혼합 정밀도 행렬 분해는 행렬 곱셈을 두 개의 스트림으로 분리합니다: (1) fp16로 곱해지는 체계적인 특이값 이상치 스트림 행렬(0.01%) 및 (2) int8 행렬 곱셈의 일반적인 스트림(99.9%). 이 방법을 사용하면 매우 큰 모델에 대해 예측 저하 없이 int8 추론이 가능합니다. 이 방법에 대한 자세한 내용은 [논문](https://arxiv.org/abs/2208.07339)이나 [통합에 관한 블로그 글](https://huggingface.co/blog/hf-bitsandbytes-integration)에서 확인할 수 있습니다. ![MixedInt8.gif](https://cdn-uploads.huggingface.co/production/uploads/1660567469965-62441d1d9fdefb55a0b7d12c.gif) 커널은 GPU 전용으로 컴파일되어 있기 때문에 혼합 8비트 모델을 실행하려면 GPU가 필요합니다. 이 기능을 사용하기 전에 모델의 1/4(또는 모델 가중치가 절반 정밀도인 경우 절반)을 저장할 충분한 GPU 메모리가 있는지 확인하세요. 이 모듈을 사용하는 데 도움이 되는 몇 가지 참고 사항이 아래에 나와 있습니다. 또는 [Google colab](#colab-demos)에서 데모를 따라할 수도 있습니다. ### 요구 사항 [[requirements-for-int8-mixedprecision-matrix-decomposition]] - `bitsandbytes<0.37.0`을 사용하는 경우, 8비트 텐서 코어(Turing, Ampere 또는 이후 아키텍처 - 예: T4, RTX20s RTX30s, A40-A100)를 지원하는 NVIDIA GPU에서 실행하는지 확인하세요. `bitsandbytes>=0.37.0`을 사용하는 경우, 모든 GPU가 지원됩니다. - 올바른 버전의 `bitsandbytes`를 다음 명령으로 설치하세요: `pip install bitsandbytes>=0.31.5` - `accelerate`를 설치하세요 `pip install accelerate>=0.12.0` ### 혼합 Int8 모델 실행 - 단일 GPU 설정 [[running-mixedint8-models-single-gpu-setup]] 필요한 라이브러리를 설치한 후 혼합 8비트 모델을 가져오는 방법은 다음과 같습니다: ```py from transformers import AutoModelForCausalLM model_name = "bigscience/bloom-2b5" model_8bit = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", load_in_8bit=True) ``` 텍스트 생성의 경우: * `pipeline()` 함수 대신 모델의 `generate()` 메소드를 사용하는 것을 권장합니다. `pipeline()` 함수로는 추론이 가능하지만, 혼합 8비트 모델에 최적화되지 않았기 때문에 `generate()` 메소드를 사용하는 것보다 느릴 수 있습니다. 또한, nucleus 샘플링과 같은 일부 샘플링 전략은 혼합 8비트 모델에 대해 `pipeline()` 함수에서 지원되지 않습니다. * 입력을 모델과 동일한 GPU에 배치하는 것이 좋습니다. 다음은 간단한 예입니다: ```py from transformers import AutoModelForCausalLM, AutoTokenizer model_name = "bigscience/bloom-2b5" tokenizer = AutoTokenizer.from_pretrained(model_name) model_8bit = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", load_in_8bit=True) prompt = "Hello, my llama is cute" inputs = tokenizer(prompt, return_tensors="pt").to("cuda") generated_ids = model.generate(**inputs) outputs = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) ``` ### 혼합 Int8 모델 실행 - 다중 GPU 설정 [[running-mixedint8-models-multi-gpu-setup]] 다중 GPU에서 혼합 8비트 모델을 로드하는 방법은 단일 GPU 설정과 동일합니다(동일한 명령어 사용): ```py model_name = "bigscience/bloom-2b5" model_8bit = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", load_in_8bit=True) ``` 하지만 `accelerate`를 사용하여 각 GPU에 할당할 GPU RAM을 제어할 수 있습니다. 다음과 같이 `max_memory` 인수를 사용하세요: ```py max_memory_mapping = {0: "1GB", 1: "2GB"} model_name = "bigscience/bloom-3b" model_8bit = AutoModelForCausalLM.from_pretrained( model_name, device_map="auto", load_in_8bit=True, max_memory=max_memory_mapping ) ``` 이 예시에서는 첫 번째 GPU가 1GB의 메모리를 사용하고 두 번째 GPU가 2GB를 사용합니다. ### Colab 데모 [[colab-demos]] 이 방법을 사용하면 이전에 Google Colab에서 추론할 수 없었던 모델에 대해 추론할 수 있습니다. Google Colab에서 8비트 양자화를 사용하여 T5-11b(42GB in fp32)를 실행하는 데모를 확인하세요: [![Open In Colab: T5-11b demo](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1YORPWx4okIHXnjW7MSAidXN29mPVNT7F?usp=sharing) 또는 BLOOM-3B에 대한 데모를 확인하세요: [![Open In Colab: BLOOM-3b demo](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1qOjXfQIAULfKvZqwCen8-MoWKGdSatZ4?usp=sharing)
transformers/docs/source/ko/perf_infer_gpu_one.md/0
{ "file_path": "transformers/docs/source/ko/perf_infer_gpu_one.md", "repo_id": "transformers", "token_count": 6492 }
285
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 🤗 Transformers로 할 수 있는 것[[what__transformers_can_do]] 🤗 Transformers는 자연어처리(NLP), 컴퓨터 비전, 오디오 및 음성 처리 작업에 대한 사전훈련된 최첨단 모델 라이브러리입니다. 이 라이브러리는 트랜스포머 모델뿐만 아니라 컴퓨터 비전 작업을 위한 현대적인 합성곱 신경망과 같은 트랜스포머가 아닌 모델도 포함하고 있습니다. 스마트폰, 앱, 텔레비전과 같은 오늘날 가장 인기 있는 소비자 제품을 살펴보면, 딥러닝 기술이 그 뒤에 사용되고 있을 확률이 높습니다. 스마트폰으로 촬영한 사진에서 배경 객체를 제거하고 싶다면 어떻게 할까요? 이는 파놉틱 세그멘테이션 작업의 예입니다(아직 이게 무엇인지 모른다면, 다음 섹션에서 설명하겠습니다!). 이 페이지는 다양한 음성 및 오디오, 컴퓨터 비전, NLP 작업을 🤗 Transformers 라이브러리를 활용하여 다루는 간단한 예제를 3줄의 코드로 제공합니다. ## 오디오[[audio]] 음성 및 오디오 처리 작업은 다른 모달리티와 약간 다릅니다. 이는 주로 오디오가 연속적인 신호로 입력되기 때문입니다. 텍스트와 달리 원본 오디오 파형(waveform)은 문장이 단어로 나눠지는 것처럼 깔끔하게 이산적인 묶음으로 나눌 수 없습니다. 이를 극복하기 위해 원본 오디오 신호는 일정한 간격으로 샘플링됩니다. 해당 간격 내에서 더 많은 샘플을 취할 경우 샘플링률이 높아지며, 오디오는 원본 오디오 소스에 더 가까워집니다. 과거의 접근 방식은 오디오에서 유용한 특징을 추출하기 위해 오디오를 전처리하는 것이었습니다. 하지만 현재는 원본 오디오 파형을 특성 인코더에 직접 넣어서 오디오 표현(representation)을 추출하는 것이 더 일반적입니다. 이렇게 하면 전처리 단계가 단순해지고 모델이 가장 중요한 특징을 학습할 수 있습니다. ### 오디오 분류[[audio_classification]] 오디오 분류는 오디오 데이터에 미리 정의된 클래스 집합의 레이블을 지정하는 작업입니다. 이는 많은 구체적인 응용 프로그램을 포함한 넓은 범주입니다. 일부 예시는 다음과 같습니다: * 음향 장면 분류: 오디오에 장면 레이블("사무실", "해변", "경기장")을 지정합니다. * 음향 이벤트 감지: 오디오에 소리 이벤트 레이블("차 경적", "고래 울음소리", "유리 파손")을 지정합니다. * 태깅: 여러 가지 소리(새 지저귐, 회의에서의 화자 식별)가 포함된 오디오에 레이블을 지정합니다. * 음악 분류: 음악에 장르 레이블("메탈", "힙합", "컨트리")을 지정합니다. ```py >>> from transformers import pipeline >>> classifier = pipeline(task="audio-classification", model="superb/hubert-base-superb-er") >>> preds = classifier("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac") >>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds] >>> preds [{'score': 0.4532, 'label': 'hap'}, {'score': 0.3622, 'label': 'sad'}, {'score': 0.0943, 'label': 'neu'}, {'score': 0.0903, 'label': 'ang'}] ``` ### 자동 음성 인식[[automatic_speech_recognition]] 자동 음성 인식(ASR)은 음성을 텍스트로 변환하는 작업입니다. 음성은 인간의 자연스러운 의사소통 형태이기 때문에 ASR은 가장 일반적인 오디오 작업 중 하나입니다. 오늘날 ASR 시스템은 스피커, 전화 및 자동차와 같은 "스마트" 기술 제품에 내장되어 있습니다. 우리는 가상 비서에게 음악 재생, 알림 설정 및 날씨 정보를 요청할 수 있습니다. 하지만 트랜스포머 아키텍처가 해결하는 데 도움을 준 핵심 도전 과제 중 하나는 양이 데이터 양이 적은 언어(low-resource language)에 대한 것입니다. 대량의 음성 데이터로 사전 훈련한 후 데이터 양이 적은 언어에서 레이블이 지정된 음성 데이터 1시간만으로 모델을 미세 조정하면 이전의 100배 많은 레이블이 지정된 데이터로 훈련된 ASR 시스템보다 훨씬 더 높은 품질의 결과를 얻을 수 있습니다. ```py >>> from transformers import pipeline >>> transcriber = pipeline(task="automatic-speech-recognition", model="openai/whisper-small") >>> transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac") {'text': ' I have a dream that one day this nation will rise up and live out the true meaning of its creed.'} ``` ## 컴퓨터 비전[[computer_vision]] 컴퓨터 비전 작업 중 가장 초기의 성공적인 작업 중 하나는 [합성곱 신경망(CNN)](glossary#convolution)을 사용하여 우편번호 숫자 이미지를 인식하는 것이었습니다. 이미지는 픽셀로 구성되어 있으며 각 픽셀은 숫자 값으로 표현됩니다. 이로써 이미지를 픽셀 값의 행렬로 나타내는 것이 쉬워집니다. 특정한 픽셀 값의 조합은 이미지의 색상을 의미합니다. 컴퓨터 비전 작업은 일반적으로 다음 두 가지 방법으로 접근 가능합니다: 1. 합성곱을 사용하여 이미지의 낮은 수준 특징에서 높은 수준의 추상적인 요소까지 계층적으로 학습합니다. 2. 이미지를 패치로 나누고 트랜스포머를 사용하여 점진적으로 각 이미지 패치가 서로 어떠한 방식으로 연관되어 이미지를 형성하는지 학습합니다. `CNN`에서 선호하는 상향식 접근법과는 달리, 이 방식은 흐릿한 이미지로 초안을 그리고 점진적으로 선명한 이미지로 만들어가는 것과 유사합니다. ### 이미지 분류[[image_classification]] 이미지 분류는 한 개의 전체 이미지에 미리 정의된 클래스 집합의 레이블을 지정하는 작업입니다. 대부분의 분류 작업과 마찬가지로, 이미지 분류에는 다양한 실용적인 용도가 있으며, 일부 예시는 다음과 같습니다: * 의료: 질병을 감지하거나 환자 건강을 모니터링하기 위해 의료 이미지에 레이블을 지정합니다. * 환경: 위성 이미지를 분류하여 산림 벌채를 감시하고 야생 지역 관리를 위한 정보를 제공하거나 산불을 감지합니다. * 농업: 작물 이미지를 분류하여 식물 건강을 확인하거나 위성 이미지를 분류하여 토지 이용 관찰에 사용합니다. * 생태학: 동물이나 식물 종 이미지를 분류하여 야생 동물 개체군을 조사하거나 멸종 위기에 처한 종을 추적합니다. ```py >>> from transformers import pipeline >>> classifier = pipeline(task="image-classification") >>> preds = classifier( ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" ... ) >>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds] >>> print(*preds, sep="\n") {'score': 0.4335, 'label': 'lynx, catamount'} {'score': 0.0348, 'label': 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor'} {'score': 0.0324, 'label': 'snow leopard, ounce, Panthera uncia'} {'score': 0.0239, 'label': 'Egyptian cat'} {'score': 0.0229, 'label': 'tiger cat'} ``` ### 객체 탐지[[object_detection]] 이미지 분류와 달리 객체 탐지는 이미지 내에서 여러 객체를 식별하고 바운딩 박스로 정의된 객체의 위치를 파악합니다. 객체 탐지의 몇 가지 응용 예시는 다음과 같습니다: * 자율 주행 차량: 다른 차량, 보행자 및 신호등과 같은 일상적인 교통 객체를 감지합니다. * 원격 감지: 재난 모니터링, 도시 계획 및 기상 예측 등을 수행합니다. * 결함 탐지: 건물의 균열이나 구조적 손상, 제조 결함 등을 탐지합니다. ```py >>> from transformers import pipeline >>> detector = pipeline(task="object-detection") >>> preds = detector( ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" ... ) >>> preds = [{"score": round(pred["score"], 4), "label": pred["label"], "box": pred["box"]} for pred in preds] >>> preds [{'score': 0.9865, 'label': 'cat', 'box': {'xmin': 178, 'ymin': 154, 'xmax': 882, 'ymax': 598}}] ``` ### 이미지 분할[[image_segmentation]] 이미지 분할은 픽셀 차원의 작업으로, 이미지 내의 모든 픽셀을 클래스에 할당합니다. 이는 객체 탐지와 다릅니다. 객체 탐지는 바운딩 박스를 사용하여 이미지 내의 객체를 레이블링하고 예측하는 반면, 분할은 더 세분화된 작업입니다. 분할은 픽셀 수준에서 객체를 감지할 수 있습니다. 이미지 분할에는 여러 유형이 있습니다: * 인스턴스 분할: 개체의 클래스를 레이블링하는 것 외에도, 개체의 각 구분된 인스턴스에도 레이블을 지정합니다 ("개-1", "개-2" 등). * 파놉틱 분할: 의미적 분할과 인스턴스 분할의 조합입니다. 각 픽셀을 의미적 클래스로 레이블링하는 **동시에** 개체의 각각 구분된 인스턴스로도 레이블을 지정합니다. 분할 작업은 자율 주행 차량에서 유용하며, 주변 환경의 픽셀 수준 지도를 생성하여 보행자와 다른 차량 주변에서 안전하게 탐색할 수 있습니다. 또한 의료 영상에서도 유용합니다. 분할 작업이 픽셀 수준에서 객체를 감지할 수 있기 때문에 비정상적인 세포나 장기의 특징을 식별하는 데 도움이 될 수 있습니다. 이미지 분할은 의류 가상 시착이나 카메라를 통해 실제 세계에 가상 개체를 덧씌워 증강 현실 경험을 만드는 등 전자 상거래 분야에서도 사용될 수 있습니다. ```py >>> from transformers import pipeline >>> segmenter = pipeline(task="image-segmentation") >>> preds = segmenter( ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" ... ) >>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds] >>> print(*preds, sep="\n") {'score': 0.9879, 'label': 'LABEL_184'} {'score': 0.9973, 'label': 'snow'} {'score': 0.9972, 'label': 'cat'} ``` ### 깊이 추정[[depth_estimation]] 깊이 추정은 카메라로부터 이미지 내부의 각 픽셀의 거리를 예측합니다. 이 컴퓨터 비전 작업은 특히 장면 이해와 재구성에 중요합니다. 예를 들어, 자율 주행 차량은 보행자, 교통 표지판 및 다른 차량과 같은 객체와의 거리를 이해하여 장애물과 충돌을 피해야 합니다. 깊이 정보는 또한 2D 이미지에서 3D 표현을 구성하는 데 도움이 되며 생물학적 구조나 건물의 고품질 3D 표현을 생성하는 데 사용될 수 있습니다. 깊이 추정에는 두 가지 접근 방식이 있습니다: * 스테레오: 약간 다른 각도에서 촬영된 동일한 이미지 두 장을 비교하여 깊이를 추정합니다. * 단안: 단일 이미지에서 깊이를 추정합니다. ```py >>> from transformers import pipeline >>> depth_estimator = pipeline(task="depth-estimation") >>> preds = depth_estimator( ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" ... ) ``` ## 자연어처리[[natural_language_processing]] 텍스트는 인간이 의사 소통하는 자연스러운 방식 중 하나이기 때문에 자연어처리 역시 가장 일반적인 작업 유형 중 하나입니다. 모델이 인식하는 형식으로 텍스트를 변환하려면 토큰화해야 합니다. 이는 텍스트 시퀀스를 개별 단어 또는 하위 단어(토큰)로 분할한 다음 이러한 토큰을 숫자로 변환하는 것을 의미합니다. 결과적으로 텍스트 시퀀스를 숫자 시퀀스로 표현할 수 있으며, 숫자 시퀀스를 다양한 자연어처리 작업을 해결하기 위한 모델에 입력할 수 있습니다! ### 텍스트 분류[[text_classification]] 다른 모달리티에서의 분류 작업과 마찬가지로 텍스트 분류는 미리 정의된 클래스 집합에서 텍스트 시퀀스(문장 수준, 단락 또는 문서 등)에 레이블을 지정합니다. 텍스트 분류에는 다양한 실용적인 응용 사례가 있으며, 일부 예시는 다음과 같습니다: * 감성 분석: 텍스트를 `긍정` 또는 `부정`과 같은 어떤 극성에 따라 레이블링하여 정치, 금융, 마케팅과 같은 분야에서 의사 결정에 정보를 제공하고 지원할 수 있습니다. * 콘텐츠 분류: 텍스트를 주제에 따라 레이블링(날씨, 스포츠, 금융 등)하여 뉴스 및 소셜 미디어 피드에서 정보를 구성하고 필터링하는 데 도움이 될 수 있습니다. ```py >>> from transformers import pipeline >>> classifier = pipeline(task="sentiment-analysis") >>> preds = classifier("Hugging Face is the best thing since sliced bread!") >>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds] >>> preds [{'score': 0.9991, 'label': 'POSITIVE'}] ``` ### 토큰 분류[[token_classification]] 모든 자연어처리 작업에서는 텍스트가 개별 단어나 하위 단어로 분리되어 전처리됩니다. 분리된 단어를 [토큰](/glossary#token)이라고 합니다. 토큰 분류는 각 토큰에 미리 정의된 클래스 집합의 레이블을 할당합니다. 토큰 분류의 두 가지 일반적인 유형은 다음과 같습니다: * 개체명 인식 (NER): 토큰을 조직, 인물, 위치 또는 날짜와 같은 개체 범주에 따라 레이블링합니다. NER은 특히 유전체학적인 환경에서 유전자, 단백질 및 약물 이름에 레이블을 지정하는 데 널리 사용됩니다. * 품사 태깅 (POS): 명사, 동사, 형용사와 같은 품사에 따라 토큰에 레이블을 할당합니다. POS는 번역 시스템이 동일한 단어가 문법적으로 어떻게 다른지 이해하는 데 도움이 됩니다 (명사로 사용되는 "bank(은행)"과 동사로 사용되는 "bank(예금을 예치하다)"과 같은 경우). ```py >>> from transformers import pipeline >>> classifier = pipeline(task="ner") >>> preds = classifier("Hugging Face is a French company based in New York City.") >>> preds = [ ... { ... "entity": pred["entity"], ... "score": round(pred["score"], 4), ... "index": pred["index"], ... "word": pred["word"], ... "start": pred["start"], ... "end": pred["end"], ... } ... for pred in preds ... ] >>> print(*preds, sep="\n") {'entity': 'I-ORG', 'score': 0.9968, 'index': 1, 'word': 'Hu', 'start': 0, 'end': 2} {'entity': 'I-ORG', 'score': 0.9293, 'index': 2, 'word': '##gging', 'start': 2, 'end': 7} {'entity': 'I-ORG', 'score': 0.9763, 'index': 3, 'word': 'Face', 'start': 8, 'end': 12} {'entity': 'I-MISC', 'score': 0.9983, 'index': 6, 'word': 'French', 'start': 18, 'end': 24} {'entity': 'I-LOC', 'score': 0.999, 'index': 10, 'word': 'New', 'start': 42, 'end': 45} {'entity': 'I-LOC', 'score': 0.9987, 'index': 11, 'word': 'York', 'start': 46, 'end': 50} {'entity': 'I-LOC', 'score': 0.9992, 'index': 12, 'word': 'City', 'start': 51, 'end': 55} ``` ### 질의응답[[question_answering]] 질의응답은 또 하나의 토큰 차원의 작업으로, 문맥이 있을 때(개방형 도메인)와 문맥이 없을 때(폐쇄형 도메인) 질문에 대한 답변을 반환합니다. 이 작업은 가상 비서에게 식당이 영업 중인지와 같은 질문을 할 때마다 발생할 수 있습니다. 고객 지원 또는 기술 지원을 제공하거나 검색 엔진이 요청한 정보를 검색하는 데 도움을 줄 수 있습니다. 질문 답변에는 일반적으로 두 가지 유형이 있습니다: * 추출형: 질문과 문맥이 주어졌을 때, 모델이 주어진 문맥의 일부에서 가져온 텍스트의 범위를 답변으로 합니다. * 생성형: 질문과 문맥이 주어졌을 때, 주어진 문맥을 통해 답변을 생성합니다. 이 접근 방식은 [`QuestionAnsweringPipeline`] 대신 [`Text2TextGenerationPipeline`]을 통해 처리됩니다. ```py >>> from transformers import pipeline >>> question_answerer = pipeline(task="question-answering") >>> preds = question_answerer( ... question="What is the name of the repository?", ... context="The name of the repository is huggingface/transformers", ... ) >>> print( ... f"score: {round(preds['score'], 4)}, start: {preds['start']}, end: {preds['end']}, answer: {preds['answer']}" ... ) score: 0.9327, start: 30, end: 54, answer: huggingface/transformers ``` ### 요약[[summarization]] 요약은 원본 문서의 의미를 최대한 보존하면서 긴 문서를 짧은 문서로 만드는 작업입니다. 요약은 `sequence-to-sequence` 작업입니다. 입력보다 짧은 텍스트 시퀀스를 출력합니다. 요약 작업은 독자가 장문 문서들의 주요 포인트를 빠르게 이해하는 데 도움을 줄 수 있습니다. 입법안, 법률 및 금융 문서, 특허 및 과학 논문은 요약 작업이 독자의 시간을 절약하고 독서 보조 도구로 사용될 수 있는 몇 가지 예시입니다. 질문 답변과 마찬가지로 요약에는 두 가지 유형이 있습니다: * 추출형: 원본 텍스트에서 가장 중요한 문장을 식별하고 추출합니다. * 생성형: 원본 텍스트에서 목표 요약을 생성합니다. 입력 문서에 없는 새로운 단어를 포함할 수도 있습니다. [`SummarizationPipeline`]은 생성형 접근 방식을 사용합니다. ```py >>> from transformers import pipeline >>> summarizer = pipeline(task="summarization") >>> summarizer( ... "In this work, we presented the Transformer, the first sequence transduction model based entirely on attention, replacing the recurrent layers most commonly used in encoder-decoder architectures with multi-headed self-attention. For translation tasks, the Transformer can be trained significantly faster than architectures based on recurrent or convolutional layers. On both WMT 2014 English-to-German and WMT 2014 English-to-French translation tasks, we achieve a new state of the art. In the former task our best model outperforms even all previously reported ensembles." ... ) [{'summary_text': ' The Transformer is the first sequence transduction model based entirely on attention . It replaces the recurrent layers most commonly used in encoder-decoder architectures with multi-headed self-attention . For translation tasks, the Transformer can be trained significantly faster than architectures based on recurrent or convolutional layers .'}] ``` ### 번역[[translation]] 번역은 한 언어로 된 텍스트 시퀀스를 다른 언어로 변환하는 작업입니다. 이는 서로 다른 배경을 가진 사람들이 서로 소통하는 데 도움을 주는 중요한 역할을 합니다. 더 넓은 대중에게 콘텐츠를 번역하여 전달하거나, 새로운 언어를 배우는 데 도움이 되는 학습 도구가 될 수도 있습니다. 요약과 마찬가지로, 번역은 `sequence-to-sequence` 작업입니다. 즉, 모델은 입력 시퀀스를 받아서 출력이 되는 목표 시퀀스를 반환합니다. 초기의 번역 모델은 대부분 단일 언어로 이루어져 있었지만, 최근에는 많은 언어 쌍 간에 번역을 수행할 수 있는 다중 언어 모델에 대한 관심이 높아지고 있습니다. ```py >>> from transformers import pipeline >>> text = "translate English to French: Hugging Face is a community-based open-source platform for machine learning." >>> translator = pipeline(task="translation", model="google-t5/t5-small") >>> translator(text) [{'translation_text': "Hugging Face est une tribune communautaire de l'apprentissage des machines."}] ``` ### 언어 모델링[[language_modeling]] 언어 모델링은 텍스트 시퀀스에서 단어를 예측하는 작업입니다. 사전 훈련된 언어 모델은 많은 다른 하위 작업에 따라 미세 조정될 수 있기 때문에 매우 인기 있는 자연어처리 작업이 되었습니다. 최근에는 제로 샷(zero-shot) 또는 퓨 샷(few-shot) 학습이 가능한 대규모 언어 모델(Large Language Models, LLM)에 대한 많은 관심이 발생하고 있습니다. 이는 모델이 명시적으로 훈련되지 않은 작업도 해결할 수 있다는 것을 의미합니다! 언어 모델은 유창하고 설득력 있는 텍스트를 생성하는 데 사용될 수 있지만, 텍스트가 항상 정확하지는 않을 수 있으므로 주의가 필요합니다. 언어 모델링에는 두 가지 유형이 있습니다: * 인과적 언어 모델링: 이 모델의 목적은 시퀀스에서 다음 토큰을 예측하는 것이며, 미래 토큰이 마스킹 됩니다. ```py >>> from transformers import pipeline >>> prompt = "Hugging Face is a community-based open-source platform for machine learning." >>> generator = pipeline(task="text-generation") >>> generator(prompt) # doctest: +SKIP ``` * 마스킹된 언어 모델링: 이 모델의 목적은 시퀀스 내의 마스킹된 토큰을 예측하는 것이며, 시퀀스 내의 모든 토큰에 대한 접근이 제공됩니다. ```py >>> text = "Hugging Face is a community-based open-source <mask> for machine learning." >>> fill_mask = pipeline(task="fill-mask") >>> preds = fill_mask(text, top_k=1) >>> preds = [ ... { ... "score": round(pred["score"], 4), ... "token": pred["token"], ... "token_str": pred["token_str"], ... "sequence": pred["sequence"], ... } ... for pred in preds ... ] >>> preds [{'score': 0.2236, 'token': 1761, 'token_str': ' platform', 'sequence': 'Hugging Face is a community-based open-source platform for machine learning.'}] ``` 이 페이지를 통해 각 모달리티의 다양한 작업 유형과 각 작업의 실용적 중요성에 대해 추가적인 배경 정보를 얻으셨기를 바랍니다. 다음 [섹션](tasks_explained)에서는 🤗 Transformer가 이러한 작업을 해결하는 **방법**에 대해 알아보실 수 있습니다.
transformers/docs/source/ko/task_summary.md/0
{ "file_path": "transformers/docs/source/ko/task_summary.md", "repo_id": "transformers", "token_count": 15732 }
286
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 번역[[translation]] [[open-in-colab]] <Youtube id="1JvfrvZgi6c"/> 번역은 한 언어로 된 시퀀스를 다른 언어로 변환합니다. 번역이나 요약은 입력을 받아 일련의 출력을 반환하는 강력한 프레임워크인 시퀀스-투-시퀀스 문제로 구성할 수 있는 대표적인 태스크입니다. 번역 시스템은 일반적으로 다른 언어로 된 텍스트 간의 번역에 사용되지만, 음성 간의 통역이나 텍스트-음성 또는 음성-텍스트와 같은 조합에도 사용될 수 있습니다. 이 가이드에서 학습할 내용은: 1. 영어 텍스트를 프랑스어로 번역하기 위해 [T5](https://huggingface.co/google-t5/t5-small) 모델을 OPUS Books 데이터세트의 영어-프랑스어 하위 집합으로 파인튜닝하는 방법과 2. 파인튜닝된 모델을 추론에 사용하는 방법입니다. <Tip> 이 태스크 가이드는 아래 모델 아키텍처에도 응용할 수 있습니다. <!--This tip is automatically generated by `make fix-copies`, do not fill manually!--> [BART](../model_doc/bart), [BigBird-Pegasus](../model_doc/bigbird_pegasus), [Blenderbot](../model_doc/blenderbot), [BlenderbotSmall](../model_doc/blenderbot-small), [Encoder decoder](../model_doc/encoder-decoder), [FairSeq Machine-Translation](../model_doc/fsmt), [GPTSAN-japanese](../model_doc/gptsan-japanese), [LED](../model_doc/led), [LongT5](../model_doc/longt5), [M2M100](../model_doc/m2m_100), [Marian](../model_doc/marian), [mBART](../model_doc/mbart), [MT5](../model_doc/mt5), [MVP](../model_doc/mvp), [NLLB](../model_doc/nllb), [NLLB-MOE](../model_doc/nllb-moe), [Pegasus](../model_doc/pegasus), [PEGASUS-X](../model_doc/pegasus_x), [PLBart](../model_doc/plbart), [ProphetNet](../model_doc/prophetnet), [SwitchTransformers](../model_doc/switch_transformers), [T5](../model_doc/t5), [XLM-ProphetNet](../model_doc/xlm-prophetnet) <!--End of the generated tip--> </Tip> 시작하기 전에 필요한 라이브러리가 모두 설치되어 있는지 확인하세요: ```bash pip install transformers datasets evaluate sacrebleu ``` 모델을 업로드하고 커뮤니티와 공유할 수 있도록 Hugging Face 계정에 로그인하는 것이 좋습니다. 새로운 창이 표시되면 토큰을 입력하여 로그인하세요. ```py >>> from huggingface_hub import notebook_login >>> notebook_login() ``` ## OPUS Books 데이터세트 가져오기[[load-opus-books-dataset]] 먼저 🤗 Datasets 라이브러리에서 [OPUS Books](https://huggingface.co/datasets/opus_books) 데이터세트의 영어-프랑스어 하위 집합을 가져오세요. ```py >>> from datasets import load_dataset >>> books = load_dataset("opus_books", "en-fr") ``` 데이터세트를 [`~datasets.Dataset.train_test_split`] 메서드를 사용하여 훈련 및 테스트 데이터로 분할하세요. ```py >>> books = books["train"].train_test_split(test_size=0.2) ``` 훈련 데이터에서 예시를 살펴볼까요? ```py >>> books["train"][0] {'id': '90560', 'translation': {'en': 'But this lofty plateau measured only a few fathoms, and soon we reentered Our Element.', 'fr': 'Mais ce plateau élevé ne mesurait que quelques toises, et bientôt nous fûmes rentrés dans notre élément.'}} ``` 반환된 딕셔너리의 `translation` 키가 텍스트의 영어, 프랑스어 버전을 포함하고 있는 것을 볼 수 있습니다. ## 전처리[[preprocess]] <Youtube id="XAR8jnZZuUs"/> 다음 단계로 영어-프랑스어 쌍을 처리하기 위해 T5 토크나이저를 가져오세요. ```py >>> from transformers import AutoTokenizer >>> checkpoint = "google-t5/t5-small" >>> tokenizer = AutoTokenizer.from_pretrained(checkpoint) ``` 만들 전처리 함수는 아래 요구사항을 충족해야 합니다: 1. T5가 번역 태스크임을 인지할 수 있도록 입력 앞에 프롬프트를 추가하세요. 여러 NLP 태스크를 할 수 있는 모델 중 일부는 이렇게 태스크 프롬프트를 미리 줘야합니다. 2. 원어(영어)과 번역어(프랑스어)를 별도로 토큰화하세요. 영어 어휘로 사전 학습된 토크나이저로 프랑스어 텍스트를 토큰화할 수는 없기 때문입니다. 3. `max_length` 매개변수로 설정한 최대 길이보다 길지 않도록 시퀀스를 truncate하세요. ```py >>> source_lang = "en" >>> target_lang = "fr" >>> prefix = "translate English to French: " >>> def preprocess_function(examples): ... inputs = [prefix + example[source_lang] for example in examples["translation"]] ... targets = [example[target_lang] for example in examples["translation"]] ... model_inputs = tokenizer(inputs, text_target=targets, max_length=128, truncation=True) ... return model_inputs ``` 전체 데이터세트에 전처리 함수를 적용하려면 🤗 Datasets의 [`~datasets.Dataset.map`] 메서드를 사용하세요. `map` 함수의 속도를 높이려면 `batched=True`를 설정하여 데이터세트의 여러 요소를 한 번에 처리하는 방법이 있습니다. ```py >>> tokenized_books = books.map(preprocess_function, batched=True) ``` 이제 [`DataCollatorForSeq2Seq`]를 사용하여 예제 배치를 생성합니다. 데이터세트의 최대 길이로 전부를 padding하는 대신, 데이터 정렬 중 각 배치의 최대 길이로 문장을 *동적으로 padding*하는 것이 더 효율적입니다. <frameworkcontent> <pt> ```py >>> from transformers import DataCollatorForSeq2Seq >>> data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=checkpoint) ``` </pt> <tf> ```py >>> from transformers import DataCollatorForSeq2Seq >>> data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=checkpoint, return_tensors="tf") ``` </tf> </frameworkcontent> ## 평가[[evalulate]] 훈련 중에 메트릭을 포함하면 모델의 성능을 평가하는 데 도움이 됩니다. 🤗 [Evaluate](https://huggingface.co/docs/evaluate/index) 라이브러리로 평가 방법(evaluation method)을 빠르게 가져올 수 있습니다. 현재 태스크에 적합한 SacreBLEU 메트릭을 가져오세요. (메트릭을 가져오고 계산하는 방법에 대해 자세히 알아보려면 🤗 Evaluate [둘러보기](https://huggingface.co/docs/evaluate/a_quick_tour)를 참조하세요): ```py >>> import evaluate >>> metric = evaluate.load("sacrebleu") ``` 그런 다음 [`~evaluate.EvaluationModule.compute`]에 예측값과 레이블을 전달하여 SacreBLEU 점수를 계산하는 함수를 생성하세요: ```py >>> import numpy as np >>> def postprocess_text(preds, labels): ... preds = [pred.strip() for pred in preds] ... labels = [[label.strip()] for label in labels] ... return preds, labels >>> def compute_metrics(eval_preds): ... preds, labels = eval_preds ... if isinstance(preds, tuple): ... preds = preds[0] ... decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True) ... labels = np.where(labels != -100, labels, tokenizer.pad_token_id) ... decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) ... decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels) ... result = metric.compute(predictions=decoded_preds, references=decoded_labels) ... result = {"bleu": result["score"]} ... prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds] ... result["gen_len"] = np.mean(prediction_lens) ... result = {k: round(v, 4) for k, v in result.items()} ... return result ``` 이제 `compute_metrics` 함수는 준비되었고, 훈련 과정을 설정할 때 다시 살펴볼 예정입니다. ## 훈련[[train]] <frameworkcontent> <pt> <Tip> [`Trainer`]로 모델을 파인튜닝하는 방법에 익숙하지 않다면 [여기](../training#train-with-pytorch-trainer)에서 기본 튜토리얼을 살펴보시기 바랍니다! </Tip> 모델을 훈련시킬 준비가 되었군요! [`AutoModelForSeq2SeqLM`]으로 T5를 로드하세요: ```py >>> from transformers import AutoModelForSeq2SeqLM, Seq2SeqTrainingArguments, Seq2SeqTrainer >>> model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint) ``` 이제 세 단계만 거치면 끝입니다: 1. [`Seq2SeqTrainingArguments`]에서 훈련 하이퍼파라미터를 정의하세요. 유일한 필수 매개변수는 모델을 저장할 위치인 `output_dir`입니다. 모델을 Hub에 푸시하기 위해 `push_to_hub=True`로 설정하세요. (모델을 업로드하려면 Hugging Face에 로그인해야 합니다.) [`Trainer`]는 에폭이 끝날때마다 SacreBLEU 메트릭을 평가하고 훈련 체크포인트를 저장합니다. 2. [`Seq2SeqTrainer`]에 훈련 인수를 전달하세요. 모델, 데이터 세트, 토크나이저, data collator 및 `compute_metrics` 함수도 덩달아 전달해야 합니다. 3. [`~Trainer.train`]을 호출하여 모델을 파인튜닝하세요. ```py >>> training_args = Seq2SeqTrainingArguments( ... output_dir="my_awesome_opus_books_model", ... evaluation_strategy="epoch", ... learning_rate=2e-5, ... per_device_train_batch_size=16, ... per_device_eval_batch_size=16, ... weight_decay=0.01, ... save_total_limit=3, ... num_train_epochs=2, ... predict_with_generate=True, ... fp16=True, ... push_to_hub=True, ... ) >>> trainer = Seq2SeqTrainer( ... model=model, ... args=training_args, ... train_dataset=tokenized_books["train"], ... eval_dataset=tokenized_books["test"], ... tokenizer=tokenizer, ... data_collator=data_collator, ... compute_metrics=compute_metrics, ... ) >>> trainer.train() ``` 학습이 완료되면 [`~transformers.Trainer.push_to_hub`] 메서드로 모델을 Hub에 공유하세요. 이러면 누구나 모델을 사용할 수 있게 됩니다: ```py >>> trainer.push_to_hub() ``` </pt> <tf> <Tip> Keras로 모델을 파인튜닝하는 방법이 익숙하지 않다면, [여기](../training#train-a-tensorflow-model-with-keras)에서 기본 튜토리얼을 살펴보시기 바랍니다! </Tip> TensorFlow에서 모델을 파인튜닝하려면 우선 optimizer 함수, 학습률 스케줄 등의 훈련 하이퍼파라미터를 설정하세요: ```py >>> from transformers import AdamWeightDecay >>> optimizer = AdamWeightDecay(learning_rate=2e-5, weight_decay_rate=0.01) ``` 이제 [`TFAutoModelForSeq2SeqLM`]로 T5를 가져오세요: ```py >>> from transformers import TFAutoModelForSeq2SeqLM >>> model = TFAutoModelForSeq2SeqLM.from_pretrained(checkpoint) ``` [`~transformers.TFPreTrainedModel.prepare_tf_dataset`]로 데이터 세트를 `tf.data.Dataset` 형식으로 변환하세요: ```py >>> tf_train_set = model.prepare_tf_dataset( ... tokenized_books["train"], ... shuffle=True, ... batch_size=16, ... collate_fn=data_collator, ... ) >>> tf_test_set = model.prepare_tf_dataset( ... tokenized_books["test"], ... shuffle=False, ... batch_size=16, ... collate_fn=data_collator, ... ) ``` 훈련하기 위해 [`compile`](https://keras.io/api/models/model_training_apis/#compile-method) 메서드로 모델을 구성하세요: ```py >>> import tensorflow as tf >>> model.compile(optimizer=optimizer) ``` 훈련을 시작하기 전에 예측값으로부터 SacreBLEU 메트릭을 계산하는 방법과 모델을 Hub에 업로드하는 방법 두 가지를 미리 설정해둬야 합니다. 둘 다 [Keras callbacks](../main_classes/keras_callbacks)로 구현하세요. [`~transformers.KerasMetricCallback`]에 `compute_metrics` 함수를 전달하세요. ```py >>> from transformers.keras_callbacks import KerasMetricCallback >>> metric_callback = KerasMetricCallback(metric_fn=compute_metrics, eval_dataset=tf_validation_set) ``` 모델과 토크나이저를 업로드할 위치를 [`~transformers.PushToHubCallback`]에서 지정하세요: ```py >>> from transformers.keras_callbacks import PushToHubCallback >>> push_to_hub_callback = PushToHubCallback( ... output_dir="my_awesome_opus_books_model", ... tokenizer=tokenizer, ... ) ``` 이제 콜백들을 한데로 묶어주세요: ```py >>> callbacks = [metric_callback, push_to_hub_callback] ``` 드디어 모델을 훈련시킬 모든 준비를 마쳤군요! 이제 훈련 및 검증 데이터 세트에 [`fit`](https://keras.io/api/models/model_training_apis/#fit-method) 메서드를 에폭 수와 만들어둔 콜백과 함께 호출하여 모델을 파인튜닝하세요: ```py >>> model.fit(x=tf_train_set, validation_data=tf_test_set, epochs=3, callbacks=callbacks) ``` 학습이 완료되면 모델이 자동으로 Hub에 업로드되고, 누구나 사용할 수 있게 됩니다! </tf> </frameworkcontent> <Tip> 번역을 위해 모델을 파인튜닝하는 방법에 대한 보다 자세한 예제는 해당 [PyTorch 노트북](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/translation.ipynb) 또는 [TensorFlow 노트북](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/translation-tf.ipynb)을 참조하세요. </Tip> ## 추론[[inference]] 좋아요, 이제 모델을 파인튜닝했으니 추론에 사용할 수 있습니다! 다른 언어로 번역하고 싶은 텍스트를 써보세요. T5의 경우 원하는 태스크를 입력의 접두사로 추가해야 합니다. 예를 들어 영어에서 프랑스어로 번역하는 경우, 아래와 같은 접두사가 추가됩니다: ```py >>> text = "translate English to French: Legumes share resources with nitrogen-fixing bacteria." ``` 파인튜닝된 모델로 추론하기에 제일 간단한 방법은 [`pipeline`]을 사용하는 것입니다. 해당 모델로 번역 `pipeline`을 만든 뒤, 텍스트를 전달하세요: ```py >>> from transformers import pipeline >>> translator = pipeline("translation", model="my_awesome_opus_books_model") >>> translator(text) [{'translation_text': 'Legumes partagent des ressources avec des bactéries azotantes.'}] ``` 원한다면 `pipeline`의 결과를 직접 복제할 수도 있습니다: <frameworkcontent> <pt> 텍스트를 토큰화하고 `input_ids`를 PyTorch 텐서로 반환하세요: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("my_awesome_opus_books_model") >>> inputs = tokenizer(text, return_tensors="pt").input_ids ``` [`~transformers.generation_utils.GenerationMixin.generate`] 메서드로 번역을 생성하세요. 다양한 텍스트 생성 전략 및 생성을 제어하기 위한 매개변수에 대한 자세한 내용은 [Text Generation](../main_classes/text_generation) API를 살펴보시기 바랍니다. ```py >>> from transformers import AutoModelForSeq2SeqLM >>> model = AutoModelForSeq2SeqLM.from_pretrained("my_awesome_opus_books_model") >>> outputs = model.generate(inputs, max_new_tokens=40, do_sample=True, top_k=30, top_p=0.95) ``` 생성된 토큰 ID들을 다시 텍스트로 디코딩하세요: ```py >>> tokenizer.decode(outputs[0], skip_special_tokens=True) 'Les lignées partagent des ressources avec des bactéries enfixant l'azote.' ``` </pt> <tf> 텍스트를 토큰화하고 `input_ids`를 TensorFlow 텐서로 반환하세요: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("my_awesome_opus_books_model") >>> inputs = tokenizer(text, return_tensors="tf").input_ids ``` [`~transformers.generation_tf_utils.TFGenerationMixin.generate`] 메서드로 번역을 생성하세요. 다양한 텍스트 생성 전략 및 생성을 제어하기 위한 매개변수에 대한 자세한 내용은 [Text Generation](../main_classes/text_generation) API를 살펴보시기 바랍니다. ```py >>> from transformers import TFAutoModelForSeq2SeqLM >>> model = TFAutoModelForSeq2SeqLM.from_pretrained("my_awesome_opus_books_model") >>> outputs = model.generate(inputs, max_new_tokens=40, do_sample=True, top_k=30, top_p=0.95) ``` 생성된 토큰 ID들을 다시 텍스트로 디코딩하세요: ```py >>> tokenizer.decode(outputs[0], skip_special_tokens=True) 'Les lugumes partagent les ressources avec des bactéries fixatrices d'azote.' ``` </tf> </frameworkcontent>
transformers/docs/source/ko/tasks/translation.md/0
{ "file_path": "transformers/docs/source/ko/tasks/translation.md", "repo_id": "transformers", "token_count": 9737 }
287
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 使用 🤗 Tokenizers 中的分词器 [`PreTrainedTokenizerFast`] 依赖于 [🤗 Tokenizers](https://huggingface.co/docs/tokenizers) 库。从 🤗 Tokenizers 库获得的分词器可以被轻松地加载到 🤗 Transformers 中。 在了解具体内容之前,让我们先用几行代码创建一个虚拟的分词器: ```python >>> from tokenizers import Tokenizer >>> from tokenizers.models import BPE >>> from tokenizers.trainers import BpeTrainer >>> from tokenizers.pre_tokenizers import Whitespace >>> tokenizer = Tokenizer(BPE(unk_token="[UNK]")) >>> trainer = BpeTrainer(special_tokens=["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"]) >>> tokenizer.pre_tokenizer = Whitespace() >>> files = [...] >>> tokenizer.train(files, trainer) ``` 现在,我们拥有了一个针对我们定义的文件进行训练的分词器。我们可以在当前运行时中继续使用它,或者将其保存到一个 JSON 文件以供将来重复使用。 ## 直接从分词器对象加载 让我们看看如何利用 🤗 Transformers 库中的这个分词器对象。[`PreTrainedTokenizerFast`] 类允许通过接受已实例化的 *tokenizer* 对象作为参数,进行轻松实例化: ```python >>> from transformers import PreTrainedTokenizerFast >>> fast_tokenizer = PreTrainedTokenizerFast(tokenizer_object=tokenizer) ``` 现在可以使用这个对象,使用 🤗 Transformers 分词器共享的所有方法!前往[分词器页面](main_classes/tokenizer)了解更多信息。 ## 从 JSON 文件加载 为了从 JSON 文件中加载分词器,让我们先保存我们的分词器: ```python >>> tokenizer.save("tokenizer.json") ``` 我们保存此文件的路径可以通过 `tokenizer_file` 参数传递给 [`PreTrainedTokenizerFast`] 初始化方法: ```python >>> from transformers import PreTrainedTokenizerFast >>> fast_tokenizer = PreTrainedTokenizerFast(tokenizer_file="tokenizer.json") ``` 现在可以使用这个对象,使用 🤗 Transformers 分词器共享的所有方法!前往[分词器页面](main_classes/tokenizer)了解更多信息。
transformers/docs/source/zh/fast_tokenizers.md/0
{ "file_path": "transformers/docs/source/zh/fast_tokenizers.md", "repo_id": "transformers", "token_count": 1249 }
288
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Callbacks Callbacks可以用来自定义PyTorch [Trainer]中训练循环行为的对象(此功能尚未在TensorFlow中实现),该对象可以检查训练循环状态(用于进度报告、在TensorBoard或其他ML平台上记录日志等),并做出决策(例如提前停止)。 Callbacks是“只读”的代码片段,除了它们返回的[TrainerControl]对象外,它们不能更改训练循环中的任何内容。对于需要更改训练循环的自定义,您应该继承[Trainer]并重载您需要的方法(有关示例,请参见[trainer](trainer))。 默认情况下,`TrainingArguments.report_to` 设置为"all",然后[Trainer]将使用以下callbacks。 - [`DefaultFlowCallback`],它处理默认的日志记录、保存和评估行为 - [`PrinterCallback`] 或 [`ProgressCallback`],用于显示进度和打印日志(如果通过[`TrainingArguments`]停用tqdm,则使用第一个函数;否则使用第二个)。 - [`~integrations.TensorBoardCallback`],如果TensorBoard可访问(通过PyTorch版本 >= 1.4 或者 tensorboardX)。 - [`~integrations.WandbCallback`],如果安装了[wandb](https://www.wandb.com/)。 - [`~integrations.CometCallback`],如果安装了[comet_ml](https://www.comet.ml/site/)。 - [`~integrations.MLflowCallback`],如果安装了[mlflow](https://www.mlflow.org/)。 - [`~integrations.NeptuneCallback`],如果安装了[neptune](https://neptune.ai/)。 - [`~integrations.AzureMLCallback`],如果安装了[azureml-sdk](https://pypi.org/project/azureml-sdk/)。 - [`~integrations.CodeCarbonCallback`],如果安装了[codecarbon](https://pypi.org/project/codecarbon/)。 - [`~integrations.ClearMLCallback`],如果安装了[clearml](https://github.com/allegroai/clearml)。 - [`~integrations.DagsHubCallback`],如果安装了[dagshub](https://dagshub.com/)。 - [`~integrations.FlyteCallback`],如果安装了[flyte](https://flyte.org/)。 - [`~integrations.DVCLiveCallback`],如果安装了[dvclive](https://dvc.org/doc/dvclive)。 如果安装了一个软件包,但您不希望使用相关的集成,您可以将 `TrainingArguments.report_to` 更改为仅包含您想要使用的集成的列表(例如 `["azure_ml", "wandb"]`)。 实现callbacks的主要类是[`TrainerCallback`]。它获取用于实例化[`Trainer`]的[`TrainingArguments`],可以通过[`TrainerState`]访问该Trainer的内部状态,并可以通过[`TrainerControl`]对训练循环执行一些操作。 ## 可用的Callbacks 这里是库里可用[`TrainerCallback`]的列表: [[autodoc]] integrations.CometCallback - setup [[autodoc]] DefaultFlowCallback [[autodoc]] PrinterCallback [[autodoc]] ProgressCallback [[autodoc]] EarlyStoppingCallback [[autodoc]] integrations.TensorBoardCallback [[autodoc]] integrations.WandbCallback - setup [[autodoc]] integrations.MLflowCallback - setup [[autodoc]] integrations.AzureMLCallback [[autodoc]] integrations.CodeCarbonCallback [[autodoc]] integrations.NeptuneCallback [[autodoc]] integrations.ClearMLCallback [[autodoc]] integrations.DagsHubCallback [[autodoc]] integrations.FlyteCallback [[autodoc]] integrations.DVCLiveCallback - setup ## TrainerCallback [[autodoc]] TrainerCallback 以下是如何使用PyTorch注册自定义callback的示例: [`Trainer`]: ```python class MyCallback(TrainerCallback): "A callback that prints a message at the beginning of training" def on_train_begin(self, args, state, control, **kwargs): print("Starting training") trainer = Trainer( model, args, train_dataset=train_dataset, eval_dataset=eval_dataset, callbacks=[MyCallback], # We can either pass the callback class this way or an instance of it (MyCallback()) ) ``` 注册callback的另一种方式是调用 `trainer.add_callback()`,如下所示: ```python trainer = Trainer(...) trainer.add_callback(MyCallback) # Alternatively, we can pass an instance of the callback class trainer.add_callback(MyCallback()) ``` ## TrainerState [[autodoc]] TrainerState ## TrainerControl [[autodoc]] TrainerControl
transformers/docs/source/zh/main_classes/callback.md/0
{ "file_path": "transformers/docs/source/zh/main_classes/callback.md", "repo_id": "transformers", "token_count": 2183 }
289
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 导出为 TFLite [TensorFlow Lite](https://www.tensorflow.org/lite/guide) 是一个轻量级框架,用于资源受限的设备上,如手机、嵌入式系统和物联网(IoT)设备,部署机器学习模型。TFLite 旨在在计算能力、内存和功耗有限的设备上优化和高效运行模型。模型以一种特殊的高效可移植格式表示,其文件扩展名为 `.tflite`。 🤗 Optimum 通过 `exporters.tflite` 模块提供将 🤗 Transformers 模型导出至 TFLite 格式的功能。请参考 [🤗 Optimum 文档](https://huggingface.co/docs/optimum/exporters/tflite/overview) 以获取支持的模型架构列表。 要将模型导出为 TFLite 格式,请安装所需的依赖项: ```bash pip install optimum[exporters-tf] ``` 请参阅 [🤗 Optimum 文档](https://huggingface.co/docs/optimum/main/en/exporters/tflite/usage_guides/export_a_model) 以查看所有可用参数,或者在命令行中查看帮助: ```bash optimum-cli export tflite --help ``` 运行以下命令,以从 🤗 Hub 导出模型的检查点(checkpoint),以 `google-bert/bert-base-uncased` 为例: ```bash optimum-cli export tflite --model google-bert/bert-base-uncased --sequence_length 128 bert_tflite/ ``` 你应该能在日志中看到导出进度以及生成的 `model.tflite` 文件的保存位置,如下所示: ```bash Validating TFLite model... -[✓] TFLite model output names match reference model (logits) - Validating TFLite Model output "logits": -[✓] (1, 128, 30522) matches (1, 128, 30522) -[x] values not close enough, max diff: 5.817413330078125e-05 (atol: 1e-05) The TensorFlow Lite export succeeded with the warning: The maximum absolute difference between the output of the reference model and the TFLite exported model is not within the set tolerance 1e-05: - logits: max diff = 5.817413330078125e-05. The exported model was saved at: bert_tflite ``` 上面的示例说明了从 🤗 Hub 导出检查点的过程。导出本地模型时,首先需要确保将模型的权重和分词器文件保存在同一目录(`local_path`)中。在使用 CLI(命令行)时,将 `local_path` 传递给 `model` 参数,而不是 🤗 Hub 上的检查点名称。
transformers/docs/source/zh/tflite.md/0
{ "file_path": "transformers/docs/source/zh/tflite.md", "repo_id": "transformers", "token_count": 1386 }
290
#!/usr/bin/env python # coding=utf-8 # Copyright 2021 The HuggingFace Team All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for masked language modeling (BERT, ALBERT, RoBERTa...) with whole word masking on a text file or a dataset. Here is the full list of checkpoints on the hub that can be fine-tuned by this script: https://huggingface.co/models?filter=fill-mask """ import json import logging import math import os import sys import time import warnings from dataclasses import asdict, dataclass, field from enum import Enum from itertools import chain # You can also adapt this script on your own masked language modeling task. Pointers for this are left as comments. from pathlib import Path from typing import Dict, List, Optional, Tuple import flax import jax import jax.numpy as jnp import numpy as np import optax from datasets import load_dataset from flax import jax_utils, traverse_util from flax.jax_utils import pad_shard_unpad from flax.training import train_state from flax.training.common_utils import get_metrics, onehot, shard from huggingface_hub import HfApi from tqdm import tqdm from transformers import ( CONFIG_MAPPING, FLAX_MODEL_FOR_MASKED_LM_MAPPING, AutoConfig, AutoTokenizer, FlaxAutoModelForMaskedLM, HfArgumentParser, PreTrainedTokenizerBase, TensorType, is_tensorboard_available, set_seed, ) from transformers.utils import send_example_telemetry MODEL_CONFIG_CLASSES = list(FLAX_MODEL_FOR_MASKED_LM_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class TrainingArguments: output_dir: str = field( metadata={"help": "The output directory where the model predictions and checkpoints will be written."}, ) overwrite_output_dir: bool = field( default=False, metadata={ "help": ( "Overwrite the content of the output directory. " "Use this to continue training if output_dir points to a checkpoint directory." ) }, ) do_train: bool = field(default=False, metadata={"help": "Whether to run training."}) do_eval: bool = field(default=False, metadata={"help": "Whether to run eval on the dev set."}) per_device_train_batch_size: int = field( default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for training."} ) per_device_eval_batch_size: int = field( default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for evaluation."} ) learning_rate: float = field(default=5e-5, metadata={"help": "The initial learning rate for AdamW."}) weight_decay: float = field(default=0.0, metadata={"help": "Weight decay for AdamW if we apply some."}) adam_beta1: float = field(default=0.9, metadata={"help": "Beta1 for AdamW optimizer"}) adam_beta2: float = field(default=0.999, metadata={"help": "Beta2 for AdamW optimizer"}) adam_epsilon: float = field(default=1e-8, metadata={"help": "Epsilon for AdamW optimizer."}) adafactor: bool = field(default=False, metadata={"help": "Whether or not to replace AdamW by Adafactor."}) num_train_epochs: float = field(default=3.0, metadata={"help": "Total number of training epochs to perform."}) warmup_steps: int = field(default=0, metadata={"help": "Linear warmup over warmup_steps."}) logging_steps: int = field(default=500, metadata={"help": "Log every X updates steps."}) save_steps: int = field(default=500, metadata={"help": "Save checkpoint every X updates steps."}) eval_steps: int = field(default=None, metadata={"help": "Run an evaluation every X steps."}) seed: int = field(default=42, metadata={"help": "Random seed that will be set at the beginning of training."}) push_to_hub: bool = field( default=False, metadata={"help": "Whether or not to upload the trained model to the model hub after training."} ) hub_model_id: str = field( default=None, metadata={"help": "The name of the repository to keep in sync with the local `output_dir`."} ) hub_token: str = field(default=None, metadata={"help": "The token to use to push to the Model Hub."}) gradient_checkpointing: bool = field( default=False, metadata={ "help": "If True, use gradient checkpointing to save memory at the expense of slower backward pass." }, ) def __post_init__(self): if self.output_dir is not None: self.output_dir = os.path.expanduser(self.output_dir) def to_dict(self): """ Serializes this instance while replace `Enum` by their values (for JSON serialization support). It obfuscates the token values by removing their value. """ d = asdict(self) for k, v in d.items(): if isinstance(v, Enum): d[k] = v.value if isinstance(v, list) and len(v) > 0 and isinstance(v[0], Enum): d[k] = [x.value for x in v] if k.endswith("_token"): d[k] = f"<{k.upper()}>" return d @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. """ model_name_or_path: Optional[str] = field( default=None, metadata={ "help": ( "The model checkpoint for weights initialization. Don't set if you want to train a model from scratch." ) }, ) model_type: Optional[str] = field( default=None, metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)}, ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) dtype: Optional[str] = field( default="float32", metadata={ "help": ( "Floating-point format in which the model weights should be initialized and trained. Choose one of" " `[float32, float16, bfloat16]`." ) }, ) token: str = field( default=None, metadata={ "help": ( "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token " "generated when running `huggingface-cli login` (stored in `~/.huggingface`)." ) }, ) use_auth_token: bool = field( default=None, metadata={ "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token` instead." }, ) trust_remote_code: bool = field( default=False, metadata={ "help": ( "Whether or not to allow for custom models defined on the Hub in their own modeling files. This option " "should only be set to `True` for repositories you trust and in which you have read the code, as it will " "execute code present on the Hub on your local machine." ) }, ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ dataset_name: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."}) validation_file: Optional[str] = field( default=None, metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, ) train_ref_file: Optional[str] = field( default=None, metadata={"help": "An optional input train ref data file for whole word masking in Chinese."}, ) validation_ref_file: Optional[str] = field( default=None, metadata={"help": "An optional input validation ref data file for whole word masking in Chinese."}, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) validation_split_percentage: Optional[int] = field( default=5, metadata={ "help": "The percentage of the train set used as validation set in case there's no validation split" }, ) max_seq_length: Optional[int] = field( default=None, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated. Default to the max input length of the model." ) }, ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) mlm_probability: float = field( default=0.15, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} ) pad_to_max_length: bool = field( default=False, metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." ) }, ) line_by_line: bool = field( default=False, metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."}, ) def __post_init__(self): if self.dataset_name is None and self.train_file is None and self.validation_file is None: raise ValueError("Need either a dataset name or a training/validation file.") else: if self.train_file is not None: extension = self.train_file.split(".")[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: extension = self.validation_file.split(".")[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." @flax.struct.dataclass class FlaxDataCollatorForLanguageModeling: """ Data collator used for language modeling. Inputs are dynamically padded to the maximum length of a batch if they are not all of the same length. Args: tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`): The tokenizer used for encoding the data. mlm_probability (:obj:`float`, `optional`, defaults to 0.15): The probability with which to (randomly) mask tokens in the input. .. note:: For best performance, this data collator should be used with a dataset having items that are dictionaries or BatchEncoding, with the :obj:`"special_tokens_mask"` key, as returned by a :class:`~transformers.PreTrainedTokenizer` or a :class:`~transformers.PreTrainedTokenizerFast` with the argument :obj:`return_special_tokens_mask=True`. """ tokenizer: PreTrainedTokenizerBase mlm_probability: float = 0.15 def __post_init__(self): if self.tokenizer.mask_token is None: raise ValueError( "This tokenizer does not have a mask token which is necessary for masked language modeling. " "You should pass `mlm=False` to train on causal language modeling instead." ) def __call__(self, examples: List[Dict[str, np.ndarray]], pad_to_multiple_of: int) -> Dict[str, np.ndarray]: # Handle dict or lists with proper padding and conversion to tensor. batch = self.tokenizer.pad(examples, pad_to_multiple_of=pad_to_multiple_of, return_tensors=TensorType.NUMPY) # If special token mask has been preprocessed, pop it from the dict. special_tokens_mask = batch.pop("special_tokens_mask", None) batch["input_ids"], batch["labels"] = self.mask_tokens( batch["input_ids"], special_tokens_mask=special_tokens_mask ) return batch def mask_tokens( self, inputs: np.ndarray, special_tokens_mask: Optional[np.ndarray] ) -> Tuple[np.ndarray, np.ndarray]: """ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. """ labels = inputs.copy() # We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`) probability_matrix = np.full(labels.shape, self.mlm_probability) special_tokens_mask = special_tokens_mask.astype("bool") probability_matrix[special_tokens_mask] = 0.0 masked_indices = np.random.binomial(1, probability_matrix).astype("bool") labels[~masked_indices] = -100 # We only compute loss on masked tokens # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK]) indices_replaced = np.random.binomial(1, np.full(labels.shape, 0.8)).astype("bool") & masked_indices inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token) # 10% of the time, we replace masked input tokens with random word indices_random = np.random.binomial(1, np.full(labels.shape, 0.5)).astype("bool") indices_random &= masked_indices & ~indices_replaced random_words = np.random.randint(self.tokenizer.vocab_size, size=labels.shape, dtype="i4") inputs[indices_random] = random_words[indices_random] # The rest of the time (10% of the time) we keep the masked input tokens unchanged return inputs, labels def generate_batch_splits(samples_idx: np.ndarray, batch_size: int, drop_last=True) -> np.ndarray: """Generate batches of data for a specified batch size from sample indices. If the dataset size is not divisible by the batch size and `drop_last` is `True`, the last incomplete batch is dropped. Else, it is returned.""" num_samples = len(samples_idx) if drop_last: samples_to_remove = num_samples % batch_size if samples_to_remove != 0: samples_idx = samples_idx[:-samples_to_remove] sections_split = num_samples // batch_size samples_idx = samples_idx.reshape((sections_split, batch_size)) else: sections_split = math.ceil(num_samples / batch_size) samples_idx = np.array_split(samples_idx, sections_split) return samples_idx def write_train_metric(summary_writer, train_metrics, train_time, step): summary_writer.scalar("train_time", train_time, step) train_metrics = get_metrics(train_metrics) for key, vals in train_metrics.items(): tag = f"train_{key}" for i, val in enumerate(vals): summary_writer.scalar(tag, val, step - len(vals) + i + 1) def write_eval_metric(summary_writer, eval_metrics, step): for metric_name, value in eval_metrics.items(): summary_writer.scalar(f"eval_{metric_name}", value, step) def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() if model_args.use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token` instead.", FutureWarning, ) if model_args.token is not None: raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") model_args.token = model_args.use_auth_token # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_mlm", model_args, data_args, framework="flax") if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", level=logging.INFO, datefmt="[%X]", ) # Log on each process the small summary: logger = logging.getLogger(__name__) # Set the verbosity to info of the Transformers logger (on main process only): logger.info(f"Training/evaluation parameters {training_args}") # Set seed before initializing model. set_seed(training_args.seed) # Handle the repository creation if training_args.push_to_hub: # Retrieve of infer repo_name repo_name = training_args.hub_model_id if repo_name is None: repo_name = Path(training_args.output_dir).absolute().name # Create repo and retrieve repo_id api = HfApi() repo_id = api.create_repo(repo_name, exist_ok=True, token=training_args.hub_token).repo_id # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. datasets = load_dataset( data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, token=model_args.token, num_proc=data_args.preprocessing_num_workers, ) if "validation" not in datasets.keys(): datasets["validation"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"train[:{data_args.validation_split_percentage}%]", cache_dir=model_args.cache_dir, token=model_args.token, num_proc=data_args.preprocessing_num_workers, ) datasets["train"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"train[{data_args.validation_split_percentage}%:]", cache_dir=model_args.cache_dir, token=model_args.token, num_proc=data_args.preprocessing_num_workers, ) else: data_files = {} if data_args.train_file is not None: data_files["train"] = data_args.train_file extension = data_args.train_file.split(".")[-1] if data_args.validation_file is not None: data_files["validation"] = data_args.validation_file extension = data_args.validation_file.split(".")[-1] if extension == "txt": extension = "text" datasets = load_dataset( extension, data_files=data_files, cache_dir=model_args.cache_dir, token=model_args.token, num_proc=data_args.preprocessing_num_workers, ) if "validation" not in datasets.keys(): datasets["validation"] = load_dataset( extension, data_files=data_files, split=f"train[:{data_args.validation_split_percentage}%]", cache_dir=model_args.cache_dir, token=model_args.token, num_proc=data_args.preprocessing_num_workers, ) datasets["train"] = load_dataset( extension, data_files=data_files, split=f"train[{data_args.validation_split_percentage}%:]", cache_dir=model_args.cache_dir, token=model_args.token, num_proc=data_args.preprocessing_num_workers, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if model_args.config_name: config = AutoConfig.from_pretrained( model_args.config_name, cache_dir=model_args.cache_dir, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) elif model_args.model_name_or_path: config = AutoConfig.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) else: config = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") if model_args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) elif model_args.model_name_or_path: tokenizer = AutoTokenizer.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script. " "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: column_names = datasets["train"].column_names else: column_names = datasets["validation"].column_names text_column_name = "text" if "text" in column_names else column_names[0] max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) if data_args.line_by_line: # When using line_by_line, we just tokenize each nonempty line. padding = "max_length" if data_args.pad_to_max_length else False def tokenize_function(examples): # Remove empty lines examples = [line for line in examples if len(line) > 0 and not line.isspace()] return tokenizer( examples, return_special_tokens_mask=True, padding=padding, truncation=True, max_length=max_seq_length, ) tokenized_datasets = datasets.map( tokenize_function, input_columns=[text_column_name], batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, ) else: # Otherwise, we tokenize every text, then concatenate them together before splitting them in smaller parts. # We use `return_special_tokens_mask=True` because DataCollatorForLanguageModeling (see below) is more # efficient when it receives the `special_tokens_mask`. def tokenize_function(examples): return tokenizer(examples[text_column_name], return_special_tokens_mask=True) tokenized_datasets = datasets.map( tokenize_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, ) # Main data processing function that will concatenate all texts from our dataset and generate chunks of # max_seq_length. def group_texts(examples): # Concatenate all texts. concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()} total_length = len(concatenated_examples[list(examples.keys())[0]]) # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can # customize this part to your needs. if total_length >= max_seq_length: total_length = (total_length // max_seq_length) * max_seq_length # Split by chunks of max_len. result = { k: [t[i : i + max_seq_length] for i in range(0, total_length, max_seq_length)] for k, t in concatenated_examples.items() } return result # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a # remainder for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value # might be slower to preprocess. # # To speed up this part, we use multiprocessing. See the documentation of the map method for more information: # https://huggingface.co/docs/datasets/process#map tokenized_datasets = tokenized_datasets.map( group_texts, batched=True, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, ) # Enable tensorboard only on the master node has_tensorboard = is_tensorboard_available() if has_tensorboard and jax.process_index() == 0: try: from flax.metrics.tensorboard import SummaryWriter summary_writer = SummaryWriter(log_dir=Path(training_args.output_dir)) except ImportError as ie: has_tensorboard = False logger.warning( f"Unable to display metrics through TensorBoard because some package are not installed: {ie}" ) else: logger.warning( "Unable to display metrics through TensorBoard because the package is not installed: " "Please run pip install tensorboard to enable." ) # Data collator # This one will take care of randomly masking the tokens. data_collator = FlaxDataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=data_args.mlm_probability) # Initialize our training rng = jax.random.PRNGKey(training_args.seed) dropout_rngs = jax.random.split(rng, jax.local_device_count()) if model_args.model_name_or_path: model = FlaxAutoModelForMaskedLM.from_pretrained( model_args.model_name_or_path, config=config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype), token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) else: model = FlaxAutoModelForMaskedLM.from_config( config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype), trust_remote_code=model_args.trust_remote_code, ) if training_args.gradient_checkpointing: model.enable_gradient_checkpointing() # Store some constant num_epochs = int(training_args.num_train_epochs) train_batch_size = int(training_args.per_device_train_batch_size) * jax.device_count() per_device_eval_batch_size = int(training_args.per_device_eval_batch_size) eval_batch_size = per_device_eval_batch_size * jax.device_count() num_train_steps = len(tokenized_datasets["train"]) // train_batch_size * num_epochs # Create learning rate schedule warmup_fn = optax.linear_schedule( init_value=0.0, end_value=training_args.learning_rate, transition_steps=training_args.warmup_steps ) decay_fn = optax.linear_schedule( init_value=training_args.learning_rate, end_value=0, transition_steps=num_train_steps - training_args.warmup_steps, ) linear_decay_lr_schedule_fn = optax.join_schedules( schedules=[warmup_fn, decay_fn], boundaries=[training_args.warmup_steps] ) # We use Optax's "masking" functionality to not apply weight decay # to bias and LayerNorm scale parameters. decay_mask_fn returns a # mask boolean with the same structure as the parameters. # The mask is True for parameters that should be decayed. def decay_mask_fn(params): flat_params = traverse_util.flatten_dict(params) # find out all LayerNorm parameters layer_norm_candidates = ["layernorm", "layer_norm", "ln"] layer_norm_named_params = { layer[-2:] for layer_norm_name in layer_norm_candidates for layer in flat_params.keys() if layer_norm_name in "".join(layer).lower() } flat_mask = {path: (path[-1] != "bias" and path[-2:] not in layer_norm_named_params) for path in flat_params} return traverse_util.unflatten_dict(flat_mask) # create adam optimizer if training_args.adafactor: # We use the default parameters here to initialize adafactor, # For more details about the parameters please check https://github.com/deepmind/optax/blob/ed02befef9bf81cbbf236be3d2b0e032e9ed4a40/optax/_src/alias.py#L74 optimizer = optax.adafactor( learning_rate=linear_decay_lr_schedule_fn, ) else: optimizer = optax.adamw( learning_rate=linear_decay_lr_schedule_fn, b1=training_args.adam_beta1, b2=training_args.adam_beta2, eps=training_args.adam_epsilon, weight_decay=training_args.weight_decay, mask=decay_mask_fn, ) # Setup train state state = train_state.TrainState.create(apply_fn=model.__call__, params=model.params, tx=optimizer) # Define gradient update step fn def train_step(state, batch, dropout_rng): dropout_rng, new_dropout_rng = jax.random.split(dropout_rng) def loss_fn(params): labels = batch.pop("labels") logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0] # compute loss, ignore padded input tokens label_mask = jnp.where(labels > 0, 1.0, 0.0) loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])) * label_mask # take average loss = loss.sum() num_labels = label_mask.sum() return loss, num_labels grad_fn = jax.value_and_grad(loss_fn, has_aux=True) (loss, num_labels), grad = grad_fn(state.params) num_labels = jax.lax.psum(num_labels, "batch") # true loss = total loss / total samples loss = jax.lax.psum(loss, "batch") loss = jax.tree_util.tree_map(lambda x: x / num_labels, loss) # true grad = total grad / total samples grad = jax.lax.psum(grad, "batch") grad = jax.tree_util.tree_map(lambda x: x / num_labels, grad) new_state = state.apply_gradients(grads=grad) metrics = {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)} return new_state, metrics, new_dropout_rng # Create parallel version of the train step p_train_step = jax.pmap(train_step, "batch", donate_argnums=(0,)) # Define eval fn def eval_step(params, batch): labels = batch.pop("labels") logits = model(**batch, params=params, train=False)[0] # compute loss, ignore padded input tokens label_mask = jnp.where(labels > 0, 1.0, 0.0) loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])) * label_mask # compute accuracy accuracy = jnp.equal(jnp.argmax(logits, axis=-1), labels) * label_mask # summarize metrics metrics = {"loss": loss.sum(), "accuracy": accuracy.sum(), "normalizer": label_mask.sum()} metrics = jax.lax.psum(metrics, axis_name="batch") return metrics p_eval_step = jax.pmap(eval_step, "batch", donate_argnums=(0,)) # Replicate the train state on each device state = jax_utils.replicate(state) train_time = 0 epochs = tqdm(range(num_epochs), desc=f"Epoch ... (1/{num_epochs})", position=0) for epoch in epochs: # ======================== Training ================================ train_start = time.time() train_metrics = [] # Create sampling rng rng, input_rng = jax.random.split(rng) # Generate an epoch by shuffling sampling indices from the train dataset num_train_samples = len(tokenized_datasets["train"]) # Avoid using jax.numpy here in case of TPU training train_samples_idx = np.random.permutation(np.arange(num_train_samples)) train_batch_idx = generate_batch_splits(train_samples_idx, train_batch_size) # Gather the indexes for creating the batch and do a training step for step, batch_idx in enumerate(tqdm(train_batch_idx, desc="Training...", position=1)): samples = [tokenized_datasets["train"][int(idx)] for idx in batch_idx] model_inputs = data_collator(samples, pad_to_multiple_of=16) # Model forward model_inputs = shard(model_inputs.data) state, train_metric, dropout_rngs = p_train_step(state, model_inputs, dropout_rngs) train_metrics.append(train_metric) cur_step = epoch * (num_train_samples // train_batch_size) + step if cur_step % training_args.logging_steps == 0 and cur_step > 0: # Save metrics train_metric = jax_utils.unreplicate(train_metric) train_time += time.time() - train_start if has_tensorboard and jax.process_index() == 0: write_train_metric(summary_writer, train_metrics, train_time, cur_step) epochs.write( f"Step... ({cur_step} | Loss: {train_metric['loss']}, Learning Rate:" f" {train_metric['learning_rate']})" ) train_metrics = [] if cur_step % training_args.eval_steps == 0 and cur_step > 0: # ======================== Evaluating ============================== num_eval_samples = len(tokenized_datasets["validation"]) # Avoid using jax.numpy here in case of TPU training eval_samples_idx = np.arange(num_eval_samples) eval_batch_idx = generate_batch_splits(eval_samples_idx, eval_batch_size, drop_last=False) eval_metrics = [] for i, batch_idx in enumerate(tqdm(eval_batch_idx, desc="Evaluating ...", position=2)): samples = [tokenized_datasets["validation"][int(idx)] for idx in batch_idx] model_inputs = data_collator(samples, pad_to_multiple_of=16) # Model forward metrics = pad_shard_unpad(p_eval_step, static_return=True)( state.params, model_inputs.data, min_device_batch=per_device_eval_batch_size ) eval_metrics.append(metrics) # normalize eval metrics eval_metrics = get_metrics(eval_metrics) eval_metrics = jax.tree_util.tree_map(jnp.sum, eval_metrics) eval_normalizer = eval_metrics.pop("normalizer") eval_metrics = jax.tree_util.tree_map(lambda x: x / eval_normalizer, eval_metrics) # Update progress bar epochs.desc = f"Step... ({cur_step} | Loss: {eval_metrics['loss']}, Acc: {eval_metrics['accuracy']})" # Save metrics if has_tensorboard and jax.process_index() == 0: write_eval_metric(summary_writer, eval_metrics, cur_step) if cur_step % training_args.save_steps == 0 and cur_step > 0: # save checkpoint after each epoch and push checkpoint to the hub if jax.process_index() == 0: params = jax.device_get(jax.tree_util.tree_map(lambda x: x[0], state.params)) model.save_pretrained(training_args.output_dir, params=params) tokenizer.save_pretrained(training_args.output_dir) if training_args.push_to_hub: api.upload_folder( commit_message=f"Saving weights and logs of step {cur_step}", folder_path=training_args.output_dir, repo_id=repo_id, repo_type="model", token=training_args.hub_token, ) # Eval after training if training_args.do_eval: num_eval_samples = len(tokenized_datasets["validation"]) # Avoid using jax.numpy here in case of TPU training eval_samples_idx = np.arange(num_eval_samples) eval_batch_idx = generate_batch_splits(eval_samples_idx, eval_batch_size, drop_last=False) eval_metrics = [] for _, batch_idx in enumerate(tqdm(eval_batch_idx, desc="Evaluating ...", position=2)): samples = [tokenized_datasets["validation"][int(idx)] for idx in batch_idx] model_inputs = data_collator(samples, pad_to_multiple_of=16) # Model forward metrics = pad_shard_unpad(p_eval_step, static_return=True)( state.params, model_inputs.data, min_device_batch=per_device_eval_batch_size ) eval_metrics.append(metrics) # normalize eval metrics eval_metrics = get_metrics(eval_metrics) eval_metrics = jax.tree_util.tree_map(lambda metric: jnp.sum(metric).item(), eval_metrics) eval_normalizer = eval_metrics.pop("normalizer") eval_metrics = jax.tree_util.tree_map(lambda x: x / eval_normalizer, eval_metrics) try: perplexity = math.exp(eval_metrics["loss"]) except OverflowError: perplexity = float("inf") eval_metrics["perplexity"] = perplexity if jax.process_index() == 0: eval_metrics = {f"eval_{metric_name}": value for metric_name, value in eval_metrics.items()} path = os.path.join(training_args.output_dir, "eval_results.json") with open(path, "w") as f: json.dump(eval_metrics, f, indent=4, sort_keys=True) if __name__ == "__main__": main()
transformers/examples/flax/language-modeling/run_mlm_flax.py/0
{ "file_path": "transformers/examples/flax/language-modeling/run_mlm_flax.py", "repo_id": "transformers", "token_count": 17238 }
291
#!/usr/bin/env python # coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Finetuning a 🤗 Flax Transformers model for sequence classification on GLUE.""" import json import logging import math import os import random import sys import time import warnings from dataclasses import dataclass, field from pathlib import Path from typing import Any, Callable, Dict, Optional, Tuple import datasets import evaluate import jax import jax.numpy as jnp import numpy as np import optax from datasets import load_dataset from flax import struct, traverse_util from flax.jax_utils import pad_shard_unpad, replicate, unreplicate from flax.training import train_state from flax.training.common_utils import get_metrics, onehot, shard from huggingface_hub import HfApi from tqdm import tqdm import transformers from transformers import ( AutoConfig, AutoTokenizer, FlaxAutoModelForSequenceClassification, HfArgumentParser, PretrainedConfig, TrainingArguments, is_tensorboard_available, ) from transformers.utils import check_min_version, send_example_telemetry logger = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.40.0.dev0") Array = Any Dataset = datasets.arrow_dataset.Dataset PRNGKey = Any task_to_keys = { "cola": ("sentence", None), "mnli": ("premise", "hypothesis"), "mrpc": ("sentence1", "sentence2"), "qnli": ("question", "sentence"), "qqp": ("question1", "question2"), "rte": ("sentence1", "sentence2"), "sst2": ("sentence", None), "stsb": ("sentence1", "sentence2"), "wnli": ("sentence1", "sentence2"), } @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) use_slow_tokenizer: Optional[bool] = field( default=False, metadata={"help": "If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library)."}, ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) token: str = field( default=None, metadata={ "help": ( "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token " "generated when running `huggingface-cli login` (stored in `~/.huggingface`)." ) }, ) use_auth_token: bool = field( default=None, metadata={ "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token` instead." }, ) trust_remote_code: bool = field( default=False, metadata={ "help": ( "Whether or not to allow for custom models defined on the Hub in their own modeling files. This option " "should only be set to `True` for repositories you trust and in which you have read the code, as it will " "execute code present on the Hub on your local machine." ) }, ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ task_name: Optional[str] = field( default=None, metadata={"help": f"The name of the glue task to train on. choices {list(task_to_keys.keys())}"} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_file: Optional[str] = field( default=None, metadata={"help": "The input training data file (a csv or JSON file)."} ) validation_file: Optional[str] = field( default=None, metadata={"help": "An optional input evaluation data file to evaluate on (a csv or JSON file)."}, ) test_file: Optional[str] = field( default=None, metadata={"help": "An optional input test data file to predict on (a csv or JSON file)."}, ) text_column_name: Optional[str] = field( default=None, metadata={"help": "The column name of text to input in the file (a csv or JSON file)."} ) label_column_name: Optional[str] = field( default=None, metadata={"help": "The column name of label to input in the file (a csv or JSON file)."} ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) max_seq_length: int = field( default=None, metadata={ "help": ( "The maximum total input sequence length after tokenization. If set, sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) max_predict_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of prediction examples to this " "value if set." ) }, ) def __post_init__(self): if self.task_name is None and self.train_file is None and self.validation_file is None: raise ValueError("Need either a dataset name or a training/validation file.") else: if self.train_file is not None: extension = self.train_file.split(".")[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: extension = self.validation_file.split(".")[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." self.task_name = self.task_name.lower() if isinstance(self.task_name, str) else self.task_name def create_train_state( model: FlaxAutoModelForSequenceClassification, learning_rate_fn: Callable[[int], float], is_regression: bool, num_labels: int, weight_decay: float, ) -> train_state.TrainState: """Create initial training state.""" class TrainState(train_state.TrainState): """Train state with an Optax optimizer. The two functions below differ depending on whether the task is classification or regression. Args: logits_fn: Applied to last layer to obtain the logits. loss_fn: Function to compute the loss. """ logits_fn: Callable = struct.field(pytree_node=False) loss_fn: Callable = struct.field(pytree_node=False) # We use Optax's "masking" functionality to not apply weight decay # to bias and LayerNorm scale parameters. decay_mask_fn returns a # mask boolean with the same structure as the parameters. # The mask is True for parameters that should be decayed. def decay_mask_fn(params): flat_params = traverse_util.flatten_dict(params) # find out all LayerNorm parameters layer_norm_candidates = ["layernorm", "layer_norm", "ln"] layer_norm_named_params = { layer[-2:] for layer_norm_name in layer_norm_candidates for layer in flat_params.keys() if layer_norm_name in "".join(layer).lower() } flat_mask = {path: (path[-1] != "bias" and path[-2:] not in layer_norm_named_params) for path in flat_params} return traverse_util.unflatten_dict(flat_mask) tx = optax.adamw( learning_rate=learning_rate_fn, b1=0.9, b2=0.999, eps=1e-6, weight_decay=weight_decay, mask=decay_mask_fn ) if is_regression: def mse_loss(logits, labels): return jnp.mean((logits[..., 0] - labels) ** 2) return TrainState.create( apply_fn=model.__call__, params=model.params, tx=tx, logits_fn=lambda logits: logits[..., 0], loss_fn=mse_loss, ) else: # Classification. def cross_entropy_loss(logits, labels): xentropy = optax.softmax_cross_entropy(logits, onehot(labels, num_classes=num_labels)) return jnp.mean(xentropy) return TrainState.create( apply_fn=model.__call__, params=model.params, tx=tx, logits_fn=lambda logits: logits.argmax(-1), loss_fn=cross_entropy_loss, ) def create_learning_rate_fn( train_ds_size: int, train_batch_size: int, num_train_epochs: int, num_warmup_steps: int, learning_rate: float ) -> Callable[[int], jnp.ndarray]: """Returns a linear warmup, linear_decay learning rate function.""" steps_per_epoch = train_ds_size // train_batch_size num_train_steps = steps_per_epoch * num_train_epochs warmup_fn = optax.linear_schedule(init_value=0.0, end_value=learning_rate, transition_steps=num_warmup_steps) decay_fn = optax.linear_schedule( init_value=learning_rate, end_value=0, transition_steps=num_train_steps - num_warmup_steps ) schedule_fn = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[num_warmup_steps]) return schedule_fn def glue_train_data_collator(rng: PRNGKey, dataset: Dataset, batch_size: int): """Returns shuffled batches of size `batch_size` from truncated `train dataset`, sharded over all local devices.""" steps_per_epoch = len(dataset) // batch_size perms = jax.random.permutation(rng, len(dataset)) perms = perms[: steps_per_epoch * batch_size] # Skip incomplete batch. perms = perms.reshape((steps_per_epoch, batch_size)) for perm in perms: batch = dataset[perm] batch = {k: np.array(v) for k, v in batch.items()} batch = shard(batch) yield batch def glue_eval_data_collator(dataset: Dataset, batch_size: int): """Returns batches of size `batch_size` from `eval dataset`. Sharding handled by `pad_shard_unpad` in the eval loop.""" batch_idx = np.arange(len(dataset)) steps_per_epoch = math.ceil(len(dataset) / batch_size) batch_idx = np.array_split(batch_idx, steps_per_epoch) for idx in batch_idx: batch = dataset[idx] batch = {k: np.array(v) for k, v in batch.items()} yield batch def main(): parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() if model_args.use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token` instead.", FutureWarning, ) if model_args.token is not None: raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") model_args.token = model_args.use_auth_token # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_glue", model_args, data_args, framework="flax") # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR) if jax.process_index() == 0: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # Handle the repository creation if training_args.push_to_hub: # Retrieve of infer repo_name repo_name = training_args.hub_model_id if repo_name is None: repo_name = Path(training_args.output_dir).absolute().name # Create repo and retrieve repo_id api = HfApi() repo_id = api.create_repo(repo_name, exist_ok=True, token=training_args.hub_token).repo_id # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the # sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named # label if at least two columns are provided. # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.task_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset( "glue", data_args.task_name, token=model_args.token, ) else: # Loading the dataset from local csv or json file. data_files = {} if data_args.train_file is not None: data_files["train"] = data_args.train_file if data_args.validation_file is not None: data_files["validation"] = data_args.validation_file extension = (data_args.train_file if data_args.train_file is not None else data_args.valid_file).split(".")[-1] raw_datasets = load_dataset( extension, data_files=data_files, token=model_args.token, ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets. # Labels if data_args.task_name is not None: is_regression = data_args.task_name == "stsb" if not is_regression: label_list = raw_datasets["train"].features["label"].names num_labels = len(label_list) else: num_labels = 1 else: # Trying to have good defaults here, don't hesitate to tweak to your needs. is_regression = raw_datasets["train"].features["label"].dtype in ["float32", "float64"] if is_regression: num_labels = 1 else: # A useful fast method: # https://huggingface.co/docs/datasets/package_reference/main_classes#datasets.Dataset.unique label_list = raw_datasets["train"].unique("label") label_list.sort() # Let's sort it for determinism num_labels = len(label_list) # Load pretrained model and tokenizer config = AutoConfig.from_pretrained( model_args.model_name_or_path, num_labels=num_labels, finetuning_task=data_args.task_name, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) tokenizer = AutoTokenizer.from_pretrained( model_args.model_name_or_path, use_fast=not model_args.use_slow_tokenizer, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) model = FlaxAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path, config=config, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) # Preprocessing the datasets if data_args.task_name is not None: sentence1_key, sentence2_key = task_to_keys[data_args.task_name] else: # Again, we try to have some nice defaults but don't hesitate to tweak to your use case. non_label_column_names = [name for name in raw_datasets["train"].column_names if name != "label"] if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names: sentence1_key, sentence2_key = "sentence1", "sentence2" else: if len(non_label_column_names) >= 2: sentence1_key, sentence2_key = non_label_column_names[:2] else: sentence1_key, sentence2_key = non_label_column_names[0], None # Some models have set the order of the labels to use, so let's make sure we do use it. label_to_id = None if ( model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id and data_args.task_name is not None and not is_regression ): # Some have all caps in their config, some don't. label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()} if sorted(label_name_to_id.keys()) == sorted(label_list): logger.info( f"The configuration of the model provided the following label correspondence: {label_name_to_id}. " "Using it!" ) label_to_id = {i: label_name_to_id[label_list[i]] for i in range(num_labels)} else: logger.warning( "Your model seems to have been trained with labels, but they don't match the dataset: ", f"model labels: {sorted(label_name_to_id.keys())}, dataset labels: {sorted(label_list)}." "\nIgnoring the model labels as a result.", ) elif data_args.task_name is None: label_to_id = {v: i for i, v in enumerate(label_list)} def preprocess_function(examples): # Tokenize the texts texts = ( (examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key]) ) result = tokenizer(*texts, padding="max_length", max_length=data_args.max_seq_length, truncation=True) if "label" in examples: if label_to_id is not None: # Map labels to IDs (not necessary for GLUE tasks) result["labels"] = [label_to_id[l] for l in examples["label"]] else: # In all cases, rename the column to labels because the model will expect that. result["labels"] = examples["label"] return result processed_datasets = raw_datasets.map( preprocess_function, batched=True, remove_columns=raw_datasets["train"].column_names ) train_dataset = processed_datasets["train"] eval_dataset = processed_datasets["validation_matched" if data_args.task_name == "mnli" else "validation"] # Log a few random samples from the training set: for index in random.sample(range(len(train_dataset)), 3): logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") # Define a summary writer has_tensorboard = is_tensorboard_available() if has_tensorboard and jax.process_index() == 0: try: from flax.metrics.tensorboard import SummaryWriter summary_writer = SummaryWriter(training_args.output_dir) summary_writer.hparams({**training_args.to_dict(), **vars(model_args), **vars(data_args)}) except ImportError as ie: has_tensorboard = False logger.warning( f"Unable to display metrics through TensorBoard because some package are not installed: {ie}" ) else: logger.warning( "Unable to display metrics through TensorBoard because the package is not installed: " "Please run pip install tensorboard to enable." ) def write_train_metric(summary_writer, train_metrics, train_time, step): summary_writer.scalar("train_time", train_time, step) train_metrics = get_metrics(train_metrics) for key, vals in train_metrics.items(): tag = f"train_{key}" for i, val in enumerate(vals): summary_writer.scalar(tag, val, step - len(vals) + i + 1) def write_eval_metric(summary_writer, eval_metrics, step): for metric_name, value in eval_metrics.items(): summary_writer.scalar(f"eval_{metric_name}", value, step) num_epochs = int(training_args.num_train_epochs) rng = jax.random.PRNGKey(training_args.seed) dropout_rngs = jax.random.split(rng, jax.local_device_count()) train_batch_size = int(training_args.per_device_train_batch_size) * jax.local_device_count() per_device_eval_batch_size = int(training_args.per_device_eval_batch_size) eval_batch_size = per_device_eval_batch_size * jax.device_count() learning_rate_fn = create_learning_rate_fn( len(train_dataset), train_batch_size, training_args.num_train_epochs, training_args.warmup_steps, training_args.learning_rate, ) state = create_train_state( model, learning_rate_fn, is_regression, num_labels=num_labels, weight_decay=training_args.weight_decay ) # define step functions def train_step( state: train_state.TrainState, batch: Dict[str, Array], dropout_rng: PRNGKey ) -> Tuple[train_state.TrainState, float]: """Trains model with an optimizer (both in `state`) on `batch`, returning a pair `(new_state, loss)`.""" dropout_rng, new_dropout_rng = jax.random.split(dropout_rng) targets = batch.pop("labels") def loss_fn(params): logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0] loss = state.loss_fn(logits, targets) return loss grad_fn = jax.value_and_grad(loss_fn) loss, grad = grad_fn(state.params) grad = jax.lax.pmean(grad, "batch") new_state = state.apply_gradients(grads=grad) metrics = jax.lax.pmean({"loss": loss, "learning_rate": learning_rate_fn(state.step)}, axis_name="batch") return new_state, metrics, new_dropout_rng p_train_step = jax.pmap(train_step, axis_name="batch", donate_argnums=(0,)) def eval_step(state, batch): logits = state.apply_fn(**batch, params=state.params, train=False)[0] return state.logits_fn(logits) p_eval_step = jax.pmap(eval_step, axis_name="batch") if data_args.task_name is not None: metric = evaluate.load("glue", data_args.task_name, cache_dir=model_args.cache_dir) else: metric = evaluate.load("accuracy", cache_dir=model_args.cache_dir) logger.info(f"===== Starting training ({num_epochs} epochs) =====") train_time = 0 # make sure weights are replicated on each device state = replicate(state) steps_per_epoch = len(train_dataset) // train_batch_size total_steps = steps_per_epoch * num_epochs epochs = tqdm(range(num_epochs), desc=f"Epoch ... (0/{num_epochs})", position=0) for epoch in epochs: train_start = time.time() train_metrics = [] # Create sampling rng rng, input_rng = jax.random.split(rng) # train train_loader = glue_train_data_collator(input_rng, train_dataset, train_batch_size) for step, batch in enumerate( tqdm( train_loader, total=steps_per_epoch, desc="Training...", position=1, ), ): state, train_metric, dropout_rngs = p_train_step(state, batch, dropout_rngs) train_metrics.append(train_metric) cur_step = (epoch * steps_per_epoch) + (step + 1) if cur_step % training_args.logging_steps == 0 and cur_step > 0: # Save metrics train_metric = unreplicate(train_metric) train_time += time.time() - train_start if has_tensorboard and jax.process_index() == 0: write_train_metric(summary_writer, train_metrics, train_time, cur_step) epochs.write( f"Step... ({cur_step}/{total_steps} | Training Loss: {train_metric['loss']}, Learning Rate:" f" {train_metric['learning_rate']})" ) train_metrics = [] if (cur_step % training_args.eval_steps == 0 or cur_step % steps_per_epoch == 0) and cur_step > 0: # evaluate eval_loader = glue_eval_data_collator(eval_dataset, eval_batch_size) for batch in tqdm( eval_loader, total=math.ceil(len(eval_dataset) / eval_batch_size), desc="Evaluating ...", position=2, ): labels = batch.pop("labels") predictions = pad_shard_unpad(p_eval_step)( state, batch, min_device_batch=per_device_eval_batch_size ) metric.add_batch(predictions=np.array(predictions), references=labels) eval_metric = metric.compute() logger.info(f"Step... ({cur_step}/{total_steps} | Eval metrics: {eval_metric})") if has_tensorboard and jax.process_index() == 0: write_eval_metric(summary_writer, eval_metric, cur_step) if (cur_step % training_args.save_steps == 0 and cur_step > 0) or (cur_step == total_steps): # save checkpoint after each epoch and push checkpoint to the hub if jax.process_index() == 0: params = jax.device_get(unreplicate(state.params)) model.save_pretrained(training_args.output_dir, params=params) tokenizer.save_pretrained(training_args.output_dir) if training_args.push_to_hub: api.upload_folder( commit_message=f"Saving weights and logs of epoch {epoch}", folder_path=training_args.output_dir, repo_id=repo_id, repo_type="model", token=training_args.hub_token, ) epochs.desc = f"Epoch ... {epoch + 1}/{num_epochs}" # save the eval metrics in json if jax.process_index() == 0: eval_metric = {f"eval_{metric_name}": value for metric_name, value in eval_metric.items()} path = os.path.join(training_args.output_dir, "eval_results.json") with open(path, "w") as f: json.dump(eval_metric, f, indent=4, sort_keys=True) if __name__ == "__main__": main()
transformers/examples/flax/text-classification/run_flax_glue.py/0
{ "file_path": "transformers/examples/flax/text-classification/run_flax_glue.py", "repo_id": "transformers", "token_count": 12137 }
292
import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors logger = logging.getLogger(__name__) class GLUETransformer(BaseTransformer): mode = "sequence-classification" def __init__(self, hparams): if isinstance(hparams, dict): hparams = Namespace(**hparams) hparams.glue_output_mode = glue_output_modes[hparams.task] num_labels = glue_tasks_num_labels[hparams.task] super().__init__(hparams, num_labels, self.mode) def forward(self, **inputs): return self.model(**inputs) def training_step(self, batch, batch_idx): inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: inputs["token_type_ids"] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None outputs = self(**inputs) loss = outputs[0] lr_scheduler = self.trainer.lr_schedulers[0]["scheduler"] tensorboard_logs = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def prepare_data(self): "Called to initialize data. Use the call to construct features" args = self.hparams processor = processors[args.task]() self.labels = processor.get_labels() for mode in ["train", "dev"]: cached_features_file = self._feature_file(mode) if os.path.exists(cached_features_file) and not args.overwrite_cache: logger.info("Loading features from cached file %s", cached_features_file) else: logger.info("Creating features from dataset file at %s", args.data_dir) examples = ( processor.get_dev_examples(args.data_dir) if mode == "dev" else processor.get_train_examples(args.data_dir) ) features = convert_examples_to_features( examples, self.tokenizer, max_length=args.max_seq_length, label_list=self.labels, output_mode=args.glue_output_mode, ) logger.info("Saving features into cached file %s", cached_features_file) torch.save(features, cached_features_file) def get_dataloader(self, mode: str, batch_size: int, shuffle: bool = False) -> DataLoader: "Load datasets. Called after prepare data." # We test on dev set to compare to benchmarks without having to submit to GLUE server mode = "dev" if mode == "test" else mode cached_features_file = self._feature_file(mode) logger.info("Loading features from cached file %s", cached_features_file) features = torch.load(cached_features_file) all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long) all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long) if self.hparams.glue_output_mode == "classification": all_labels = torch.tensor([f.label for f in features], dtype=torch.long) elif self.hparams.glue_output_mode == "regression": all_labels = torch.tensor([f.label for f in features], dtype=torch.float) return DataLoader( TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels), batch_size=batch_size, shuffle=shuffle, ) def validation_step(self, batch, batch_idx): inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: inputs["token_type_ids"] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None outputs = self(**inputs) tmp_eval_loss, logits = outputs[:2] preds = logits.detach().cpu().numpy() out_label_ids = inputs["labels"].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def _eval_end(self, outputs) -> tuple: val_loss_mean = torch.stack([x["val_loss"] for x in outputs]).mean().detach().cpu().item() preds = np.concatenate([x["pred"] for x in outputs], axis=0) if self.hparams.glue_output_mode == "classification": preds = np.argmax(preds, axis=1) elif self.hparams.glue_output_mode == "regression": preds = np.squeeze(preds) out_label_ids = np.concatenate([x["target"] for x in outputs], axis=0) out_label_list = [[] for _ in range(out_label_ids.shape[0])] preds_list = [[] for _ in range(out_label_ids.shape[0])] results = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task, preds, out_label_ids)} ret = dict(results.items()) ret["log"] = results return ret, preds_list, out_label_list def validation_epoch_end(self, outputs: list) -> dict: ret, preds, targets = self._eval_end(outputs) logs = ret["log"] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def test_epoch_end(self, outputs) -> dict: ret, predictions, targets = self._eval_end(outputs) logs = ret["log"] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def add_model_specific_args(parser, root_dir): BaseTransformer.add_model_specific_args(parser, root_dir) parser.add_argument( "--max_seq_length", default=128, type=int, help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ), ) parser.add_argument( "--task", default="", type=str, required=True, help="The GLUE task to run", ) parser.add_argument( "--gpus", default=0, type=int, help="The number of GPUs allocated for this, it is by default 0 meaning none", ) parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" ) return parser def main(): parser = argparse.ArgumentParser() add_generic_args(parser, os.getcwd()) parser = GLUETransformer.add_model_specific_args(parser, os.getcwd()) args = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: args.output_dir = os.path.join( "./results", f"{args.task}_{time.strftime('%Y%m%d_%H%M%S')}", ) os.makedirs(args.output_dir) model = GLUETransformer(args) trainer = generic_train(model, args) # Optionally, predict on dev set and write to output_dir if args.do_predict: checkpoints = sorted(glob.glob(os.path.join(args.output_dir, "checkpoint-epoch=*.ckpt"), recursive=True)) model = model.load_from_checkpoint(checkpoints[-1]) return trainer.test(model) if __name__ == "__main__": main()
transformers/examples/legacy/pytorch-lightning/run_glue.py/0
{ "file_path": "transformers/examples/legacy/pytorch-lightning/run_glue.py", "repo_id": "transformers", "token_count": 3476 }
293
#!/usr/bin/env python # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Union import fire import torch from tqdm import tqdm def convert(src_path: str, map_location: str = "cpu", save_path: Union[str, None] = None) -> None: """Convert a pytorch_model.bin or model.pt file to torch.float16 for faster downloads, less disk space.""" state_dict = torch.load(src_path, map_location=map_location) for k, v in tqdm(state_dict.items()): if not isinstance(v, torch.Tensor): raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin") state_dict[k] = v.half() if save_path is None: # overwrite src_path save_path = src_path torch.save(state_dict, save_path) if __name__ == "__main__": fire.Fire(convert)
transformers/examples/legacy/seq2seq/convert_model_to_fp16.py/0
{ "file_path": "transformers/examples/legacy/seq2seq/convert_model_to_fp16.py", "repo_id": "transformers", "token_count": 450 }
294
#!/usr/bin/env python # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import shutil import time from json import JSONDecodeError from logging import getLogger from pathlib import Path from typing import Dict, List import torch from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeq2SeqLM, AutoTokenizer from utils import ( Seq2SeqDataset, calculate_bleu, calculate_rouge, chunks, lmap, load_json, parse_numeric_n_bool_cl_kwargs, save_json, use_task_specific_params, write_txt_file, ) logger = getLogger(__name__) def eval_data_dir( data_dir, save_dir: str, model_name: str, bs: int = 8, max_source_length: int = 1024, type_path="val", n_obs=None, fp16=False, task="summarization", local_rank=None, num_return_sequences=1, dataset_kwargs: Dict = None, prefix="", **generate_kwargs, ) -> Dict: """Run evaluation on part of the data for one gpu and save to {save_dir}/rank_{rank}_output.json""" model_name = str(model_name) assert local_rank is not None torch.distributed.init_process_group(backend="nccl", rank=local_rank) save_dir = Path(save_dir) save_path = save_dir.joinpath(f"rank_{local_rank}_output.json") torch.cuda.set_device(local_rank) model = AutoModelForSeq2SeqLM.from_pretrained(model_name).cuda() if fp16: model = model.half() # determine if we need to increase num_beams use_task_specific_params(model, task) # update config with task specific params num_beams = generate_kwargs.pop("num_beams", model.config.num_beams) # AttributeError risk? if num_return_sequences > num_beams: num_beams = num_return_sequences tokenizer = AutoTokenizer.from_pretrained(model_name) logger.info(f"Inferred tokenizer type: {tokenizer.__class__}") # if this is wrong, check config.model_type. if max_source_length is None: max_source_length = tokenizer.model_max_length if prefix is None: prefix = prefix or getattr(model.config, "prefix", "") or "" ds = Seq2SeqDataset( tokenizer, data_dir, max_source_length, max_target_length=1024, type_path=type_path, n_obs=n_obs, prefix=prefix, **dataset_kwargs, ) # I set shuffle=True for a more accurate progress bar. # If all the longest samples are first, the prog bar estimate is too high at the beginning. sampler = ds.make_sortish_sampler(bs, distributed=True, add_extra_examples=False, shuffle=True) data_loader = DataLoader(ds, sampler=sampler, batch_size=bs, collate_fn=ds.collate_fn) results = [] for batch in tqdm(data_loader): summaries = model.generate( input_ids=batch["input_ids"].to(model.device), attention_mask=batch["attention_mask"].to(model.device), num_return_sequences=num_return_sequences, num_beams=num_beams, **generate_kwargs, ) preds = tokenizer.batch_decode(summaries, skip_special_tokens=True, clean_up_tokenization_spaces=False) ids = batch["ids"] if num_return_sequences > 1: preds = chunks(preds, num_return_sequences) # batch size chunks, each of size num_return_seq for i, pred in enumerate(preds): results.append({"pred": pred, "id": ids[i].item()}) save_json(results, save_path) return results, sampler.num_replicas def run_generate(): parser = argparse.ArgumentParser( epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" ) parser.add_argument("--data_dir", type=str, help="like cnn_dm/test.source") parser.add_argument( "--model_name", type=str, help="like facebook/bart-large-cnn,google-t5/t5-base, etc.", default="sshleifer/distilbart-xsum-12-3", ) parser.add_argument("--save_dir", type=str, help="where to save", default="tmp_gen") parser.add_argument("--max_source_length", type=int, default=None) parser.add_argument( "--type_path", type=str, default="test", help="which subset to evaluate typically train/val/test" ) parser.add_argument("--task", type=str, default="summarization", help="used for task_specific_params + metrics") parser.add_argument("--bs", type=int, default=8, required=False, help="batch size") parser.add_argument( "--local_rank", type=int, default=-1, required=False, help="should be passed by distributed.launch" ) parser.add_argument( "--n_obs", type=int, default=None, required=False, help="How many observations. Defaults to all." ) parser.add_argument( "--num_return_sequences", type=int, default=1, required=False, help="How many sequences to return" ) parser.add_argument( "--sync_timeout", type=int, default=600, required=False, help="How long should master process wait for other processes to finish.", ) parser.add_argument("--src_lang", type=str, default=None, required=False) parser.add_argument("--tgt_lang", type=str, default=None, required=False) parser.add_argument( "--prefix", type=str, required=False, default=None, help="will be added to the beginning of src examples" ) parser.add_argument("--fp16", action="store_true") parser.add_argument("--debug", action="store_true") start_time = time.time() args, rest = parser.parse_known_args() generate_kwargs = parse_numeric_n_bool_cl_kwargs(rest) if generate_kwargs and args.local_rank <= 0: print(f"parsed the following generate kwargs: {generate_kwargs}") json_save_dir = Path(args.save_dir + "_tmp") Path(json_save_dir).mkdir(exist_ok=True) # this handles locking. intermediate_files = list(json_save_dir.glob("rank_*.json")) if intermediate_files: raise ValueError(f"Found files at {json_save_dir} please move or remove them.") # In theory, a node could finish and save before another node hits this. If this happens, we can address later. dataset_kwargs = {} if args.src_lang is not None: dataset_kwargs["src_lang"] = args.src_lang if args.tgt_lang is not None: dataset_kwargs["tgt_lang"] = args.tgt_lang Path(args.save_dir).mkdir(exist_ok=True) results, num_replicas = eval_data_dir( args.data_dir, json_save_dir, args.model_name, type_path=args.type_path, bs=args.bs, fp16=args.fp16, task=args.task, local_rank=args.local_rank, n_obs=args.n_obs, max_source_length=args.max_source_length, num_return_sequences=args.num_return_sequences, prefix=args.prefix, dataset_kwargs=dataset_kwargs, **generate_kwargs, ) if args.local_rank <= 0: save_dir = Path(args.save_dir) save_dir.mkdir(exist_ok=True) partial_results = gather_results_from_each_node(num_replicas, json_save_dir, args.sync_timeout) preds = combine_partial_results(partial_results) if args.num_return_sequences > 1: save_path = save_dir.joinpath("pseudolabel_results.json") print(f"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/") save_json(preds, save_path) return tgt_file = Path(args.data_dir).joinpath(args.type_path + ".target") with open(tgt_file) as f: labels = [x.rstrip() for x in f.readlines()][: len(preds)] # Calculate metrics, save metrics, and save _generations.txt calc_bleu = "translation" in args.task score_fn = calculate_bleu if calc_bleu else calculate_rouge metric_name = "bleu" if calc_bleu else "rouge" metrics: Dict = score_fn(preds, labels) metrics["n_obs"] = len(preds) runtime = time.time() - start_time metrics["seconds_per_sample"] = round(runtime / metrics["n_obs"], 4) metrics["n_gpus"] = num_replicas # TODO(@stas00): add whatever metadata to metrics metrics_save_path = save_dir.joinpath(f"{args.type_path}_{metric_name}.json") save_json(metrics, metrics_save_path, indent=None) print(metrics) write_txt_file(preds, save_dir.joinpath(f"{args.type_path}_generations.txt")) if args.debug: write_txt_file(labels, save_dir.joinpath(f"{args.type_path}.target")) else: shutil.rmtree(json_save_dir) def combine_partial_results(partial_results) -> List: """Concatenate partial results into one file, then sort it by id.""" records = [] for partial_result in partial_results: records.extend(partial_result) records = sorted(records, key=lambda x: x["id"]) preds = [x["pred"] for x in records] return preds def gather_results_from_each_node(num_replicas, save_dir, timeout) -> List[Dict[str, List]]: # WAIT FOR lots of .json files start_wait = time.time() logger.info("waiting for all nodes to finish") json_data = None while (time.time() - start_wait) < timeout: json_files = list(save_dir.glob("rank_*.json")) if len(json_files) < num_replicas: continue try: # make sure all json files are fully saved json_data = lmap(load_json, json_files) return json_data except JSONDecodeError: continue else: raise TimeoutError("Rank 0 gave up on waiting for other processes") # Unreachable if __name__ == "__main__": # Usage for MT: run_generate()
transformers/examples/legacy/seq2seq/run_distributed_eval.py/0
{ "file_path": "transformers/examples/legacy/seq2seq/run_distributed_eval.py", "repo_id": "transformers", "token_count": 4164 }
295
<!--- Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Examples This folder contains actively maintained examples of use of 🤗 Transformers using the PyTorch backend, organized by ML task. ## The Big Table of Tasks Here is the list of all our examples: - with information on whether they are **built on top of `Trainer`** (if not, they still work, they might just lack some features), - whether or not they have a version using the [🤗 Accelerate](https://github.com/huggingface/accelerate) library. - whether or not they leverage the [🤗 Datasets](https://github.com/huggingface/datasets) library. - links to **Colab notebooks** to walk through the scripts and run them easily, <!-- Coming soon! - links to **Cloud deployments** to be able to deploy large-scale trainings in the Cloud with little to no setup. --> | Task | Example datasets | Trainer support | 🤗 Accelerate | 🤗 Datasets | Colab |---|---|:---:|:---:|:---:|:---:| | [**`language-modeling`**](https://github.com/huggingface/transformers/tree/main/examples/pytorch/language-modeling) | [WikiText-2](https://huggingface.co/datasets/wikitext) | ✅ | ✅ | ✅ | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb) | [**`multiple-choice`**](https://github.com/huggingface/transformers/tree/main/examples/pytorch/multiple-choice) | [SWAG](https://huggingface.co/datasets/swag) | ✅ | ✅ | ✅ | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/multiple_choice.ipynb) | [**`question-answering`**](https://github.com/huggingface/transformers/tree/main/examples/pytorch/question-answering) | [SQuAD](https://huggingface.co/datasets/squad) | ✅ | ✅ | ✅ | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering.ipynb) | [**`summarization`**](https://github.com/huggingface/transformers/tree/main/examples/pytorch/summarization) | [XSum](https://huggingface.co/datasets/xsum) | ✅ | ✅ | ✅ | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/summarization.ipynb) | [**`text-classification`**](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-classification) | [GLUE](https://huggingface.co/datasets/glue) | ✅ | ✅ | ✅ | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification.ipynb) | [**`text-generation`**](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-generation) | - | n/a | - | - | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/blog/blob/main/notebooks/02_how_to_generate.ipynb) | [**`token-classification`**](https://github.com/huggingface/transformers/tree/main/examples/pytorch/token-classification) | [CoNLL NER](https://huggingface.co/datasets/conll2003) | ✅ |✅ | ✅ | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification.ipynb) | [**`translation`**](https://github.com/huggingface/transformers/tree/main/examples/pytorch/translation) | [WMT](https://huggingface.co/datasets/wmt17) | ✅ | ✅ |✅ | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/translation.ipynb) | [**`speech-recognition`**](https://github.com/huggingface/transformers/tree/main/examples/pytorch/speech-recognition) | [TIMIT](https://huggingface.co/datasets/timit_asr) | ✅ | - |✅ | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/speech_recognition.ipynb) | [**`multi-lingual speech-recognition`**](https://github.com/huggingface/transformers/tree/main/examples/pytorch/speech-recognition) | [Common Voice](https://huggingface.co/datasets/common_voice) | ✅ | - |✅ | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/multi_lingual_speech_recognition.ipynb) | [**`audio-classification`**](https://github.com/huggingface/transformers/tree/main/examples/pytorch/audio-classification) | [SUPERB KS](https://huggingface.co/datasets/superb) | ✅ | - |✅ | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/audio_classification.ipynb) | [**`image-pretraining`**](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining) | [ImageNet-1k](https://huggingface.co/datasets/imagenet-1k) | ✅ | - |✅ | / | [**`image-classification`**](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-classification) | [CIFAR-10](https://huggingface.co/datasets/cifar10) | ✅ | ✅ |✅ | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb) | [**`semantic-segmentation`**](https://github.com/huggingface/transformers/tree/main/examples/pytorch/semantic-segmentation) | [SCENE_PARSE_150](https://huggingface.co/datasets/scene_parse_150) | ✅ | ✅ |✅ | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/semantic_segmentation.ipynb) ## Running quick tests Most examples are equipped with a mechanism to truncate the number of dataset samples to the desired length. This is useful for debugging purposes, for example to quickly check that all stages of the programs can complete, before running the same setup on the full dataset which may take hours to complete. For example here is how to truncate all three splits to just 50 samples each: ```bash examples/pytorch/token-classification/run_ner.py \ --max_train_samples 50 \ --max_eval_samples 50 \ --max_predict_samples 50 \ [...] ``` Most example scripts should have the first two command line arguments and some have the third one. You can quickly check if a given example supports any of these by passing a `-h` option, e.g.: ```bash examples/pytorch/token-classification/run_ner.py -h ``` ## Resuming training You can resume training from a previous checkpoint like this: 1. Pass `--output_dir previous_output_dir` without `--overwrite_output_dir` to resume training from the latest checkpoint in `output_dir` (what you would use if the training was interrupted, for instance). 2. Pass `--resume_from_checkpoint path_to_a_specific_checkpoint` to resume training from that checkpoint folder. Should you want to turn an example into a notebook where you'd no longer have access to the command line, 🤗 Trainer supports resuming from a checkpoint via `trainer.train(resume_from_checkpoint)`. 1. If `resume_from_checkpoint` is `True` it will look for the last checkpoint in the value of `output_dir` passed via `TrainingArguments`. 2. If `resume_from_checkpoint` is a path to a specific checkpoint it will use that saved checkpoint folder to resume the training from. ### Upload the trained/fine-tuned model to the Hub All the example scripts support automatic upload of your final model to the [Model Hub](https://huggingface.co/models) by adding a `--push_to_hub` argument. It will then create a repository with your username slash the name of the folder you are using as `output_dir`. For instance, `"sgugger/test-mrpc"` if your username is `sgugger` and you are working in the folder `~/tmp/test-mrpc`. To specify a given repository name, use the `--hub_model_id` argument. You will need to specify the whole repository name (including your username), for instance `--hub_model_id sgugger/finetuned-bert-mrpc`. To upload to an organization you are a member of, just use the name of that organization instead of your username: `--hub_model_id huggingface/finetuned-bert-mrpc`. A few notes on this integration: - you will need to be logged in to the Hugging Face website locally for it to work, the easiest way to achieve this is to run `huggingface-cli login` and then type your username and password when prompted. You can also pass along your authentication token with the `--hub_token` argument. - the `output_dir` you pick will either need to be a new folder or a local clone of the distant repository you are using. ## Distributed training and mixed precision All the PyTorch scripts mentioned above work out of the box with distributed training and mixed precision, thanks to the [Trainer API](https://huggingface.co/transformers/main_classes/trainer.html). To launch one of them on _n_ GPUs, use the following command: ```bash torchrun \ --nproc_per_node number_of_gpu_you_have path_to_script.py \ --all_arguments_of_the_script ``` As an example, here is how you would fine-tune the BERT large model (with whole word masking) on the text classification MNLI task using the `run_glue` script, with 8 GPUs: ```bash torchrun \ --nproc_per_node 8 pytorch/text-classification/run_glue.py \ --model_name_or_path google-bert/bert-large-uncased-whole-word-masking \ --task_name mnli \ --do_train \ --do_eval \ --max_seq_length 128 \ --per_device_train_batch_size 8 \ --learning_rate 2e-5 \ --num_train_epochs 3.0 \ --output_dir /tmp/mnli_output/ ``` If you have a GPU with mixed precision capabilities (architecture Pascal or more recent), you can use mixed precision training with PyTorch 1.6.0 or latest, or by installing the [Apex](https://github.com/NVIDIA/apex) library for previous versions. Just add the flag `--fp16` to your command launching one of the scripts mentioned above! Using mixed precision training usually results in 2x-speedup for training with the same final results (as shown in [this table](https://github.com/huggingface/transformers/tree/main/examples/text-classification#mixed-precision-training) for text classification). ## Running on TPUs When using Tensorflow, TPUs are supported out of the box as a `tf.distribute.Strategy`. When using PyTorch, we support TPUs thanks to `pytorch/xla`. For more context and information on how to setup your TPU environment refer to Google's documentation and to the very detailed [pytorch/xla README](https://github.com/pytorch/xla/blob/master/README.md). In this repo, we provide a very simple launcher script named [xla_spawn.py](https://github.com/huggingface/transformers/tree/main/examples/pytorch/xla_spawn.py) that lets you run our example scripts on multiple TPU cores without any boilerplate. Just pass a `--num_cores` flag to this script, then your regular training script with its arguments (this is similar to the `torch.distributed.launch` helper for `torch.distributed`): ```bash python xla_spawn.py --num_cores num_tpu_you_have \ path_to_script.py \ --all_arguments_of_the_script ``` As an example, here is how you would fine-tune the BERT large model (with whole word masking) on the text classification MNLI task using the `run_glue` script, with 8 TPUs (from this folder): ```bash python xla_spawn.py --num_cores 8 \ text-classification/run_glue.py \ --model_name_or_path google-bert/bert-large-uncased-whole-word-masking \ --task_name mnli \ --do_train \ --do_eval \ --max_seq_length 128 \ --per_device_train_batch_size 8 \ --learning_rate 2e-5 \ --num_train_epochs 3.0 \ --output_dir /tmp/mnli_output/ ``` ## Using Accelerate Most PyTorch example scripts have a version using the [🤗 Accelerate](https://github.com/huggingface/accelerate) library that exposes the training loop so it's easy for you to customize or tweak them to your needs. They all require you to install `accelerate` with the latest development version ```bash pip install git+https://github.com/huggingface/accelerate ``` Then you can easily launch any of the scripts by running ```bash accelerate config ``` and reply to the questions asked. Then ```bash accelerate test ``` that will check everything is ready for training. Finally, you can launch training with ```bash accelerate launch path_to_script.py --args_to_script ``` ## Logging & Experiment tracking You can easily log and monitor your runs code. The following are currently supported: * [TensorBoard](https://www.tensorflow.org/tensorboard) * [Weights & Biases](https://docs.wandb.ai/integrations/huggingface) * [Comet ML](https://www.comet.ml/docs/python-sdk/huggingface/) * [Neptune](https://docs.neptune.ai/integrations-and-supported-tools/model-training/hugging-face) * [ClearML](https://clear.ml/docs/latest/docs/getting_started/ds/ds_first_steps) * [DVCLive](https://dvc.org/doc/dvclive/ml-frameworks/huggingface) ### Weights & Biases To use Weights & Biases, install the wandb package with: ```bash pip install wandb ``` Then log in the command line: ```bash wandb login ``` If you are in Jupyter or Colab, you should login with: ```python import wandb wandb.login() ``` To enable logging to W&B, include `"wandb"` in the `report_to` of your `TrainingArguments` or script. Or just pass along `--report_to_all` if you have `wandb` installed. Whenever you use the `Trainer` class, your losses, evaluation metrics, model topology and gradients will automatically be logged. Advanced configuration is possible by setting environment variables: | Environment Variable | Value | |---|---| | WANDB_LOG_MODEL | Log the model as artifact (log the model as artifact at the end of training) (`false` by default) | | WANDB_WATCH | one of `gradients` (default) to log histograms of gradients, `all` to log histograms of both gradients and parameters, or `false` for no histogram logging | | WANDB_PROJECT | Organize runs by project | Set run names with `run_name` argument present in scripts or as part of `TrainingArguments`. Additional configuration options are available through generic [wandb environment variables](https://docs.wandb.com/library/environment-variables). Refer to related [documentation & examples](https://docs.wandb.ai/integrations/huggingface). ### Comet.ml To use `comet_ml`, install the Python package with: ```bash pip install comet_ml ``` or if in a Conda environment: ```bash conda install -c comet_ml -c anaconda -c conda-forge comet_ml ``` ### Neptune First, install the Neptune client library. You can do it with either `pip` or `conda`: `pip`: ```bash pip install neptune ``` `conda`: ```bash conda install -c conda-forge neptune ``` Next, in your model training script, import `NeptuneCallback`: ```python from transformers.integrations import NeptuneCallback ``` To enable Neptune logging, in your `TrainingArguments`, set the `report_to` argument to `"neptune"`: ```python training_args = TrainingArguments( "quick-training-distilbert-mrpc", evaluation_strategy="steps", eval_steps=20, report_to="neptune", ) trainer = Trainer( model, training_args, ... ) ``` **Note:** This method requires saving your Neptune credentials as environment variables (see the bottom of the section). Alternatively, for more logging options, create a Neptune callback: ```python neptune_callback = NeptuneCallback() ``` To add more detail to the tracked run, you can supply optional arguments to `NeptuneCallback`. Some examples: ```python neptune_callback = NeptuneCallback( name = "DistilBERT", description = "DistilBERT fine-tuned on GLUE/MRPC", tags = ["args-callback", "fine-tune", "MRPC"], # tags help you manage runs in Neptune base_namespace="callback", # the default is "finetuning" log_checkpoints = "best", # other options are "last", "same", and None capture_hardware_metrics = False, # additional keyword arguments for a Neptune run ) ``` Pass the callback to the Trainer: ```python training_args = TrainingArguments(..., report_to=None) trainer = Trainer( model, training_args, ... callbacks=[neptune_callback], ) ``` Now, when you start the training with `trainer.train()`, your metadata will be logged in Neptune. **Note:** Although you can pass your **Neptune API token** and **project name** as arguments when creating the callback, the recommended way is to save them as environment variables: | Environment variable | Value | | :------------------- | :--------------------------------------------------- | | `NEPTUNE_API_TOKEN` | Your Neptune API token. To find and copy it, click your Neptune avatar and select **Get your API token**. | | `NEPTUNE_PROJECT` | The full name of your Neptune project (`workspace-name/project-name`). To find and copy it, head to **project settings** &rarr; **Properties**. | For detailed instructions and examples, see the [Neptune docs](https://docs.neptune.ai/integrations/transformers/). ### ClearML To use ClearML, install the clearml package with: ```bash pip install clearml ``` Then [create new credentials]() from the ClearML Server. You can get a free hosted server [here]() or [self-host your own]()! After creating your new credentials, you can either copy the local snippet which you can paste after running: ```bash clearml-init ``` Or you can copy the jupyter snippet if you are in Jupyter or Colab: ```python %env CLEARML_WEB_HOST=https://app.clear.ml %env CLEARML_API_HOST=https://api.clear.ml %env CLEARML_FILES_HOST=https://files.clear.ml %env CLEARML_API_ACCESS_KEY=*** %env CLEARML_API_SECRET_KEY=*** ``` To enable logging to ClearML, include `"clearml"` in the `report_to` of your `TrainingArguments` or script. Or just pass along `--report_to all` if you have `clearml` already installed. Advanced configuration is possible by setting environment variables: | Environment Variable | Value | |---|---| | CLEARML_PROJECT | Name of the project in ClearML. (default: `"HuggingFace Transformers"`) | | CLEARML_TASK | Name of the task in ClearML. (default: `"Trainer"`) | Additional configuration options are available through generic [clearml environment variables](https://clear.ml/docs/latest/docs/configs/env_vars).
transformers/examples/pytorch/README.md/0
{ "file_path": "transformers/examples/pytorch/README.md", "repo_id": "transformers", "token_count": 6250 }
296
#!/usr/bin/env python # coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and import logging import os import sys import warnings from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version """ Pre-training a 🤗 Transformers model for simple masked image modeling (SimMIM). Any model supported by the AutoModelForMaskedImageModeling API can be used. """ logger = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.40.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt") MODEL_CONFIG_CLASSES = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command line. """ dataset_name: Optional[str] = field( default="cifar10", metadata={"help": "Name of a dataset from the datasets package"} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) image_column_name: Optional[str] = field( default=None, metadata={"help": "The column name of the images in the files. If not set, will try to use 'image' or 'img'."}, ) train_dir: Optional[str] = field(default=None, metadata={"help": "A folder containing the training data."}) validation_dir: Optional[str] = field(default=None, metadata={"help": "A folder containing the validation data."}) train_val_split: Optional[float] = field( default=0.15, metadata={"help": "Percent to split off of train for validation."} ) mask_patch_size: int = field(default=32, metadata={"help": "The size of the square patches to use for masking."}) mask_ratio: float = field( default=0.6, metadata={"help": "Percentage of patches to mask."}, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) def __post_init__(self): data_files = {} if self.train_dir is not None: data_files["train"] = self.train_dir if self.validation_dir is not None: data_files["val"] = self.validation_dir self.data_files = data_files if data_files else None @dataclass class ModelArguments: """ Arguments pertaining to which model/config/image processor we are going to pre-train. """ model_name_or_path: str = field( default=None, metadata={ "help": ( "The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a " "checkpoint identifier on the hub. " "Don't set if you want to train a model from scratch." ) }, ) model_type: Optional[str] = field( default=None, metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)}, ) config_name_or_path: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) config_overrides: Optional[str] = field( default=None, metadata={ "help": ( "Override some existing default config settings when a model is trained from scratch. Example: " "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" ) }, ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) image_processor_name: str = field(default=None, metadata={"help": "Name or path of preprocessor config."}) token: str = field( default=None, metadata={ "help": ( "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token " "generated when running `huggingface-cli login` (stored in `~/.huggingface`)." ) }, ) use_auth_token: bool = field( default=None, metadata={ "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token` instead." }, ) trust_remote_code: bool = field( default=False, metadata={ "help": ( "Whether or not to allow for custom models defined on the Hub in their own modeling files. This option " "should only be set to `True` for repositories you trust and in which you have read the code, as it will " "execute code present on the Hub on your local machine." ) }, ) image_size: Optional[int] = field( default=None, metadata={ "help": ( "The size (resolution) of each image. If not specified, will use `image_size` of the configuration." ) }, ) patch_size: Optional[int] = field( default=None, metadata={ "help": ( "The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration." ) }, ) encoder_stride: Optional[int] = field( default=None, metadata={"help": "Stride to use for the encoder."}, ) class MaskGenerator: """ A class to generate boolean masks for the pretraining task. A mask is a 1D tensor of shape (model_patch_size**2,) where the value is either 0 or 1, where 1 indicates "masked". """ def __init__(self, input_size=192, mask_patch_size=32, model_patch_size=4, mask_ratio=0.6): self.input_size = input_size self.mask_patch_size = mask_patch_size self.model_patch_size = model_patch_size self.mask_ratio = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError("Input size must be divisible by mask patch size") if self.mask_patch_size % self.model_patch_size != 0: raise ValueError("Mask patch size must be divisible by model patch size") self.rand_size = self.input_size // self.mask_patch_size self.scale = self.mask_patch_size // self.model_patch_size self.token_count = self.rand_size**2 self.mask_count = int(np.ceil(self.token_count * self.mask_ratio)) def __call__(self): mask_idx = np.random.permutation(self.token_count)[: self.mask_count] mask = np.zeros(self.token_count, dtype=int) mask[mask_idx] = 1 mask = mask.reshape((self.rand_size, self.rand_size)) mask = mask.repeat(self.scale, axis=0).repeat(self.scale, axis=1) return torch.tensor(mask.flatten()) def collate_fn(examples): pixel_values = torch.stack([example["pixel_values"] for example in examples]) mask = torch.stack([example["mask"] for example in examples]) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() if model_args.use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token` instead.", FutureWarning, ) if model_args.token is not None: raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") model_args.token = model_args.use_auth_token # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_mim", model_args, data_args) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() log_level = training_args.get_process_log_level() logger.setLevel(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " + f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Initialize our dataset. ds = load_dataset( data_args.dataset_name, data_args.dataset_config_name, data_files=data_args.data_files, cache_dir=model_args.cache_dir, token=model_args.token, ) # If we don't have a validation split, split off a percentage of train as validation. data_args.train_val_split = None if "validation" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split, float) and data_args.train_val_split > 0.0: split = ds["train"].train_test_split(data_args.train_val_split) ds["train"] = split["train"] ds["validation"] = split["test"] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config_kwargs = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "token": model_args.token, "trust_remote_code": model_args.trust_remote_code, } if model_args.config_name_or_path: config = AutoConfig.from_pretrained(model_args.config_name_or_path, **config_kwargs) elif model_args.model_name_or_path: config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs) else: config = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") if model_args.config_overrides is not None: logger.info(f"Overriding config: {model_args.config_overrides}") config.update_from_string(model_args.config_overrides) logger.info(f"New config: {config}") # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(config, "decoder_type"): config.decoder_type = "simmim" # adapt config model_args.image_size = model_args.image_size if model_args.image_size is not None else config.image_size model_args.patch_size = model_args.patch_size if model_args.patch_size is not None else config.patch_size model_args.encoder_stride = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { "image_size": model_args.image_size, "patch_size": model_args.patch_size, "encoder_stride": model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: image_processor = AutoImageProcessor.from_pretrained(model_args.image_processor_name, **config_kwargs) elif model_args.model_name_or_path: image_processor = AutoImageProcessor.from_pretrained(model_args.model_name_or_path, **config_kwargs) else: IMAGE_PROCESSOR_TYPES = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } image_processor = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: model = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) else: logger.info("Training new model from scratch") model = AutoModelForMaskedImageModeling.from_config(config, trust_remote_code=model_args.trust_remote_code) if training_args.do_train: column_names = ds["train"].column_names else: column_names = ds["validation"].column_names if data_args.image_column_name is not None: image_column_name = data_args.image_column_name elif "image" in column_names: image_column_name = "image" elif "img" in column_names: image_column_name = "img" else: image_column_name = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py transforms = Compose( [ Lambda(lambda img: img.convert("RGB") if img.mode != "RGB" else img), RandomResizedCrop(model_args.image_size, scale=(0.67, 1.0), ratio=(3.0 / 4.0, 4.0 / 3.0)), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean, std=image_processor.image_std), ] ) # create mask generator mask_generator = MaskGenerator( input_size=model_args.image_size, mask_patch_size=data_args.mask_patch_size, model_patch_size=model_args.patch_size, mask_ratio=data_args.mask_ratio, ) def preprocess_images(examples): """Preprocess a batch of images by applying transforms + creating a corresponding mask, indicating which patches to mask.""" examples["pixel_values"] = [transforms(image) for image in examples[image_column_name]] examples["mask"] = [mask_generator() for i in range(len(examples[image_column_name]))] return examples if training_args.do_train: if "train" not in ds: raise ValueError("--do_train requires a train dataset") if data_args.max_train_samples is not None: ds["train"] = ds["train"].shuffle(seed=training_args.seed).select(range(data_args.max_train_samples)) # Set the training transforms ds["train"].set_transform(preprocess_images) if training_args.do_eval: if "validation" not in ds: raise ValueError("--do_eval requires a validation dataset") if data_args.max_eval_samples is not None: ds["validation"] = ( ds["validation"].shuffle(seed=training_args.seed).select(range(data_args.max_eval_samples)) ) # Set the validation transforms ds["validation"].set_transform(preprocess_images) # Initialize our trainer trainer = Trainer( model=model, args=training_args, train_dataset=ds["train"] if training_args.do_train else None, eval_dataset=ds["validation"] if training_args.do_eval else None, tokenizer=image_processor, data_collator=collate_fn, ) # Training if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() trainer.log_metrics("train", train_result.metrics) trainer.save_metrics("train", train_result.metrics) trainer.save_state() # Evaluation if training_args.do_eval: metrics = trainer.evaluate() trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) # Write model card and (optionally) push to hub kwargs = { "finetuned_from": model_args.model_name_or_path, "tasks": "masked-image-modeling", "dataset": data_args.dataset_name, "tags": ["masked-image-modeling"], } if training_args.push_to_hub: trainer.push_to_hub(**kwargs) else: trainer.create_model_card(**kwargs) if __name__ == "__main__": main()
transformers/examples/pytorch/image-pretraining/run_mim.py/0
{ "file_path": "transformers/examples/pytorch/image-pretraining/run_mim.py", "repo_id": "transformers", "token_count": 8028 }
297
# coding=utf-8 # Copyright 2018 HuggingFace Inc.. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import logging import os import sys from time import time from unittest.mock import patch from transformers.testing_utils import TestCasePlus, require_torch_xla logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger() def get_results(output_dir): results = {} path = os.path.join(output_dir, "all_results.json") if os.path.exists(path): with open(path, "r") as f: results = json.load(f) else: raise ValueError(f"can't find {path}") return results stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) @require_torch_xla class TorchXLAExamplesTests(TestCasePlus): def test_run_glue(self): import xla_spawn tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" ./examples/pytorch/text-classification/run_glue.py --num_cores=8 ./examples/pytorch/text-classification/run_glue.py --model_name_or_path distilbert/distilbert-base-uncased --output_dir {tmp_dir} --overwrite_output_dir --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --do_train --do_eval --debug tpu_metrics_debug --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --max_steps=10 --warmup_steps=2 --seed=42 --max_seq_length=128 """.split() with patch.object(sys, "argv", testargs): start = time() xla_spawn.main() end = time() result = get_results(tmp_dir) self.assertGreaterEqual(result["eval_accuracy"], 0.75) # Assert that the script takes less than 500 seconds to make sure it doesn't hang. self.assertLess(end - start, 500) def test_trainer_tpu(self): import xla_spawn testargs = """ ./tests/test_trainer_tpu.py --num_cores=8 ./tests/test_trainer_tpu.py """.split() with patch.object(sys, "argv", testargs): xla_spawn.main()
transformers/examples/pytorch/old_test_xla_examples.py/0
{ "file_path": "transformers/examples/pytorch/old_test_xla_examples.py", "repo_id": "transformers", "token_count": 1249 }
298
#!/usr/bin/env python # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Finetuning the library models for sequence classification on GLUE.""" # You can also adapt this script on your own text classification task. Pointers for this are left as comments. import logging import os import random import sys import warnings from dataclasses import dataclass, field from typing import Optional import datasets import evaluate import numpy as np from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, PretrainedConfig, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.40.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") task_to_keys = { "cola": ("sentence", None), "mnli": ("premise", "hypothesis"), "mrpc": ("sentence1", "sentence2"), "qnli": ("question", "sentence"), "qqp": ("question1", "question2"), "rte": ("sentence1", "sentence2"), "sst2": ("sentence", None), "stsb": ("sentence1", "sentence2"), "wnli": ("sentence1", "sentence2"), } logger = logging.getLogger(__name__) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command line. """ task_name: Optional[str] = field( default=None, metadata={"help": "The name of the task to train on: " + ", ".join(task_to_keys.keys())}, ) dataset_name: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) max_seq_length: int = field( default=128, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."} ) pad_to_max_length: bool = field( default=True, metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." ) }, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) max_predict_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of prediction examples to this " "value if set." ) }, ) train_file: Optional[str] = field( default=None, metadata={"help": "A csv or a json file containing the training data."} ) validation_file: Optional[str] = field( default=None, metadata={"help": "A csv or a json file containing the validation data."} ) test_file: Optional[str] = field(default=None, metadata={"help": "A csv or a json file containing the test data."}) def __post_init__(self): if self.task_name is not None: self.task_name = self.task_name.lower() if self.task_name not in task_to_keys.keys(): raise ValueError("Unknown task, you should pick one in " + ",".join(task_to_keys.keys())) elif self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError("Need either a GLUE task, a training/validation file or a dataset name.") else: train_extension = self.train_file.split(".")[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." validation_extension = self.validation_file.split(".")[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) token: str = field( default=None, metadata={ "help": ( "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token " "generated when running `huggingface-cli login` (stored in `~/.huggingface`)." ) }, ) use_auth_token: bool = field( default=None, metadata={ "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token` instead." }, ) trust_remote_code: bool = field( default=False, metadata={ "help": ( "Whether or not to allow for custom models defined on the Hub in their own modeling files. This option " "should only be set to `True` for repositories you trust and in which you have read the code, as it will " "execute code present on the Hub on your local machine." ) }, ) ignore_mismatched_sizes: bool = field( default=False, metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."}, ) def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() if model_args.use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token` instead.", FutureWarning, ) if model_args.token is not None: raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") model_args.token = model_args.use_auth_token # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_glue", model_args, data_args) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " + f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the # sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named # label if at least two columns are provided. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.task_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset( "nyu-mll/glue", data_args.task_name, cache_dir=model_args.cache_dir, token=model_args.token, ) elif data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset( data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, token=model_args.token, ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. data_files = {"train": data_args.train_file, "validation": data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: train_extension = data_args.train_file.split(".")[-1] test_extension = data_args.test_file.split(".")[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." data_files["test"] = data_args.test_file else: raise ValueError("Need either a GLUE task or a test file for `do_predict`.") for key in data_files.keys(): logger.info(f"load a local file for {key}: {data_files[key]}") if data_args.train_file.endswith(".csv"): # Loading a dataset from local csv files raw_datasets = load_dataset( "csv", data_files=data_files, cache_dir=model_args.cache_dir, token=model_args.token, ) else: # Loading a dataset from local json files raw_datasets = load_dataset( "json", data_files=data_files, cache_dir=model_args.cache_dir, token=model_args.token, ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets. # Labels if data_args.task_name is not None: is_regression = data_args.task_name == "stsb" if not is_regression: label_list = raw_datasets["train"].features["label"].names num_labels = len(label_list) else: num_labels = 1 else: # Trying to have good defaults here, don't hesitate to tweak to your needs. is_regression = raw_datasets["train"].features["label"].dtype in ["float32", "float64"] if is_regression: num_labels = 1 else: # A useful fast method: # https://huggingface.co/docs/datasets/package_reference/main_classes#datasets.Dataset.unique label_list = raw_datasets["train"].unique("label") label_list.sort() # Let's sort it for determinism num_labels = len(label_list) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=num_labels, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) model = AutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, ) # Preprocessing the raw_datasets if data_args.task_name is not None: sentence1_key, sentence2_key = task_to_keys[data_args.task_name] else: # Again, we try to have some nice defaults but don't hesitate to tweak to your use case. non_label_column_names = [name for name in raw_datasets["train"].column_names if name != "label"] if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names: sentence1_key, sentence2_key = "sentence1", "sentence2" else: if len(non_label_column_names) >= 2: sentence1_key, sentence2_key = non_label_column_names[:2] else: sentence1_key, sentence2_key = non_label_column_names[0], None # Padding strategy if data_args.pad_to_max_length: padding = "max_length" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch padding = False # Some models have set the order of the labels to use, so let's make sure we do use it. label_to_id = None if ( model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id and data_args.task_name is not None and not is_regression ): # Some have all caps in their config, some don't. label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()} if sorted(label_name_to_id.keys()) == sorted(label_list): label_to_id = {i: int(label_name_to_id[label_list[i]]) for i in range(num_labels)} else: logger.warning( "Your model seems to have been trained with labels, but they don't match the dataset: ", f"model labels: {sorted(label_name_to_id.keys())}, dataset labels: {sorted(label_list)}." "\nIgnoring the model labels as a result.", ) elif data_args.task_name is None and not is_regression: label_to_id = {v: i for i, v in enumerate(label_list)} if label_to_id is not None: model.config.label2id = label_to_id model.config.id2label = {id: label for label, id in config.label2id.items()} elif data_args.task_name is not None and not is_regression: model.config.label2id = {l: i for i, l in enumerate(label_list)} model.config.id2label = {id: label for label, id in config.label2id.items()} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the " f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) def preprocess_function(examples): # Tokenize the texts args = ( (examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key]) ) result = tokenizer(*args, padding=padding, max_length=max_seq_length, truncation=True) # Map labels to IDs (not necessary for GLUE tasks) if label_to_id is not None and "label" in examples: result["label"] = [(label_to_id[l] if l != -1 else -1) for l in examples["label"]] return result with training_args.main_process_first(desc="dataset map pre-processing"): raw_datasets = raw_datasets.map( preprocess_function, batched=True, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on dataset", ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset") train_dataset = raw_datasets["train"] if data_args.max_train_samples is not None: max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset") eval_dataset = raw_datasets["validation_matched" if data_args.task_name == "mnli" else "validation"] if data_args.max_eval_samples is not None: max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) eval_dataset = eval_dataset.select(range(max_eval_samples)) if training_args.do_predict or data_args.task_name is not None or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError("--do_predict requires a test dataset") predict_dataset = raw_datasets["test_matched" if data_args.task_name == "mnli" else "test"] if data_args.max_predict_samples is not None: max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples) predict_dataset = predict_dataset.select(range(max_predict_samples)) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(train_dataset)), 3): logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") # Get the metric function if data_args.task_name is not None: metric = evaluate.load("glue", data_args.task_name, cache_dir=model_args.cache_dir) elif is_regression: metric = evaluate.load("mse", cache_dir=model_args.cache_dir) else: metric = evaluate.load("accuracy", cache_dir=model_args.cache_dir) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(p: EvalPrediction): preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions preds = np.squeeze(preds) if is_regression else np.argmax(preds, axis=1) result = metric.compute(predictions=preds, references=p.label_ids) if len(result) > 1: result["combined_score"] = np.mean(list(result.values())).item() return result # Data collator will default to DataCollatorWithPadding when the tokenizer is passed to Trainer, so we change it if # we already did the padding. if data_args.pad_to_max_length: data_collator = default_data_collator elif training_args.fp16: data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8) else: data_collator = None # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, compute_metrics=compute_metrics, tokenizer=tokenizer, data_collator=data_collator, ) # Training if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) metrics = train_result.metrics max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") # Loop to handle MNLI double evaluation (matched, mis-matched) tasks = [data_args.task_name] eval_datasets = [eval_dataset] if data_args.task_name == "mnli": tasks.append("mnli-mm") valid_mm_dataset = raw_datasets["validation_mismatched"] if data_args.max_eval_samples is not None: max_eval_samples = min(len(valid_mm_dataset), data_args.max_eval_samples) valid_mm_dataset = valid_mm_dataset.select(range(max_eval_samples)) eval_datasets.append(valid_mm_dataset) combined = {} for eval_dataset, task in zip(eval_datasets, tasks): metrics = trainer.evaluate(eval_dataset=eval_dataset) max_eval_samples = ( data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) ) metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) if task == "mnli-mm": metrics = {k + "_mm": v for k, v in metrics.items()} if task is not None and "mnli" in task: combined.update(metrics) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", combined if task is not None and "mnli" in task else metrics) if training_args.do_predict: logger.info("*** Predict ***") # Loop to handle MNLI double evaluation (matched, mis-matched) tasks = [data_args.task_name] predict_datasets = [predict_dataset] if data_args.task_name == "mnli": tasks.append("mnli-mm") predict_datasets.append(raw_datasets["test_mismatched"]) for predict_dataset, task in zip(predict_datasets, tasks): # Removing the `label` columns because it contains -1 and Trainer won't like that. predict_dataset = predict_dataset.remove_columns("label") predictions = trainer.predict(predict_dataset, metric_key_prefix="predict").predictions predictions = np.squeeze(predictions) if is_regression else np.argmax(predictions, axis=1) output_predict_file = os.path.join(training_args.output_dir, f"predict_results_{task}.txt") if trainer.is_world_process_zero(): with open(output_predict_file, "w") as writer: logger.info(f"***** Predict results {task} *****") writer.write("index\tprediction\n") for index, item in enumerate(predictions): if is_regression: writer.write(f"{index}\t{item:3.3f}\n") else: item = label_list[item] writer.write(f"{index}\t{item}\n") kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"} if data_args.task_name is not None: kwargs["language"] = "en" kwargs["dataset_tags"] = "glue" kwargs["dataset_args"] = data_args.task_name kwargs["dataset"] = f"GLUE {data_args.task_name.upper()}" if training_args.push_to_hub: trainer.push_to_hub(**kwargs) else: trainer.create_model_card(**kwargs) def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
transformers/examples/pytorch/text-classification/run_glue.py/0
{ "file_path": "transformers/examples/pytorch/text-classification/run_glue.py", "repo_id": "transformers", "token_count": 11739 }
299
#!/usr/bin/env python # coding=utf-8 # Copyright The HuggingFace Team and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning a 🤗 Transformers model on text translation. """ # You can also adapt this script on your own text translation task. Pointers for this are left as comments. import argparse import json import logging import math import os import random from pathlib import Path import datasets import evaluate import numpy as np import torch from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from datasets import load_dataset from huggingface_hub import HfApi from torch.utils.data import DataLoader from tqdm.auto import tqdm import transformers from transformers import ( CONFIG_MAPPING, MODEL_MAPPING, AutoConfig, AutoModelForSeq2SeqLM, AutoTokenizer, DataCollatorForSeq2Seq, MBartTokenizer, MBartTokenizerFast, SchedulerType, default_data_collator, get_scheduler, ) from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.40.0.dev0") logger = get_logger(__name__) require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/translation/requirements.txt") # You should update this to your particular problem to have better documentation of `model_type` MODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) # Parsing input arguments def parse_args(): parser = argparse.ArgumentParser(description="Finetune a transformers model on a text classification task") parser.add_argument( "--dataset_name", type=str, default=None, help="The name of the dataset to use (via the datasets library).", ) parser.add_argument( "--predict_with_generate", type=bool, default=True, help="", ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The configuration name of the dataset to use (via the datasets library).", ) parser.add_argument( "--train_file", type=str, default=None, help="A csv or a json file containing the training data." ) parser.add_argument( "--num_beams", type=int, default=None, help=( "Number of beams to use for evaluation. This argument will be " "passed to ``model.generate``, which is used during ``evaluate`` and ``predict``." ), ) parser.add_argument( "--max_source_length", type=int, default=1024, help=( "The maximum total input sequence length after " "tokenization.Sequences longer than this will be truncated, sequences shorter will be padded." ), ) parser.add_argument( "--max_target_length", type=int, default=128, help=( "The maximum total sequence length for target text after " "tokenization. Sequences longer than this will be truncated, sequences shorter will be padded " "during ``evaluate`` and ``predict``." ), ) parser.add_argument( "--val_max_target_length", type=int, default=None, help=( "The maximum total sequence length for validation " "target text after tokenization.Sequences longer than this will be truncated, sequences shorter will be " "padded. Will default to `max_target_length`.This argument is also used to override the ``max_length`` " "param of ``model.generate``, which is used during ``evaluate`` and ``predict``." ), ) parser.add_argument( "--pad_to_max_length", type=bool, default=False, help=( "Whether to pad all samples to model maximum sentence " "length. If False, will pad the samples dynamically when batching to the maximum length in the batch. More " "efficient on GPU but very bad for TPU." ), ) parser.add_argument( "--validation_file", type=str, default=None, help="A csv or a json file containing the validation data." ) parser.add_argument( "--ignore_pad_token_for_loss", type=bool, default=True, help="Whether to ignore the tokens corresponding to padded labels in the loss computation or not.", ) parser.add_argument("--source_lang", type=str, default=None, help="Source language id for translation.") parser.add_argument("--target_lang", type=str, default=None, help="Target language id for translation.") parser.add_argument( "--source_prefix", type=str, default=None, help="A prefix to add before every source text (useful for T5 models).", ) parser.add_argument( "--preprocessing_num_workers", type=int, default=None, help="The number of processes to use for the preprocessing.", ) parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" ) parser.add_argument( "--max_length", type=int, default=128, help=( "The maximum total input sequence length after tokenization. Sequences longer than this will be truncated," " sequences shorter will be padded if `--pad_to_max_length` is passed." ), ) parser.add_argument( "--model_name_or_path", type=str, help="Path to pretrained model or model identifier from huggingface.co/models.", required=False, ) parser.add_argument( "--config_name", type=str, default=None, help="Pretrained config name or path if not the same as model_name", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--use_slow_tokenizer", action="store_true", help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).", ) parser.add_argument( "--per_device_train_batch_size", type=int, default=8, help="Batch size (per device) for the training dataloader.", ) parser.add_argument( "--per_device_eval_batch_size", type=int, default=8, help="Batch size (per device) for the evaluation dataloader.", ) parser.add_argument( "--learning_rate", type=float, default=5e-5, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.") parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--lr_scheduler_type", type=SchedulerType, default="linear", help="The scheduler type to use.", choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], ) parser.add_argument( "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.") parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--model_type", type=str, default=None, help="Model type to use if training from scratch.", choices=MODEL_TYPES, ) parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument( "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." ) parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") parser.add_argument( "--trust_remote_code", type=bool, default=False, help=( "Whether or not to allow for custom models defined on the Hub in their own modeling files. This option " "should only be set to `True` for repositories you trust and in which you have read the code, as it will " "execute code present on the Hub on your local machine." ), ) parser.add_argument( "--checkpointing_steps", type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help="If the training should continue from a checkpoint folder.", ) parser.add_argument( "--with_tracking", action="store_true", help="Whether to enable experiment trackers for logging.", ) parser.add_argument( "--report_to", type=str, default="all", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,' ' `"wandb"`, `"comet_ml"` and `"clearml"`. Use `"all"` (default) to report to all integrations. ' "Only applicable when `--with_tracking` is passed." ), ) args = parser.parse_args() # Sanity checks if args.dataset_name is None and args.train_file is None and args.validation_file is None: raise ValueError("Need either a task name or a training/validation file.") if args.train_file is not None: extension = args.train_file.split(".")[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if args.validation_file is not None: extension = args.validation_file.split(".")[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." if args.push_to_hub: assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed." return args def main(): # Parse the arguments args = parse_args() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_translation_no_trainer", args) # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. # If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers # in the environment accelerator = ( Accelerator(log_with=args.report_to, project_dir=args.output_dir) if args.with_tracking else Accelerator() ) # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.push_to_hub: # Retrieve of infer repo_name repo_name = args.hub_model_id if repo_name is None: repo_name = Path(args.output_dir).absolute().name # Create repo and retrieve repo_id api = HfApi() repo_id = api.create_repo(repo_name, exist_ok=True, token=args.hub_token).repo_id with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore: if "step_*" not in gitignore: gitignore.write("step_*\n") if "epoch_*" not in gitignore: gitignore.write("epoch_*\n") elif args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) accelerator.wait_for_everyone() # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name) else: data_files = {} if args.train_file is not None: data_files["train"] = args.train_file extension = args.train_file.split(".")[-1] if args.validation_file is not None: data_files["validation"] = args.validation_file extension = args.validation_file.split(".")[-1] raw_datasets = load_dataset(extension, data_files=data_files) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets. # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if args.config_name: config = AutoConfig.from_pretrained(args.config_name, trust_remote_code=args.trust_remote_code) elif args.model_name_or_path: config = AutoConfig.from_pretrained(args.model_name_or_path, trust_remote_code=args.trust_remote_code) else: config = CONFIG_MAPPING[args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") if args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained( args.tokenizer_name, use_fast=not args.use_slow_tokenizer, trust_remote_code=args.trust_remote_code ) elif args.model_name_or_path: tokenizer = AutoTokenizer.from_pretrained( args.model_name_or_path, use_fast=not args.use_slow_tokenizer, trust_remote_code=args.trust_remote_code ) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script. " "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) if args.model_name_or_path: model = AutoModelForSeq2SeqLM.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, trust_remote_code=args.trust_remote_code, ) else: logger.info("Training new model from scratch") model = AutoModelForSeq2SeqLM.from_config(config, trust_remote_code=args.trust_remote_code) # We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch # on a small vocab and want a smaller embedding size, remove this test. embedding_size = model.get_input_embeddings().weight.shape[0] if len(tokenizer) > embedding_size: model.resize_token_embeddings(len(tokenizer)) # Set decoder_start_token_id if model.config.decoder_start_token_id is None and isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)): assert ( args.target_lang is not None and args.source_lang is not None ), "mBart requires --target_lang and --source_lang" if isinstance(tokenizer, MBartTokenizer): model.config.decoder_start_token_id = tokenizer.lang_code_to_id[args.target_lang] else: model.config.decoder_start_token_id = tokenizer.convert_tokens_to_ids(args.target_lang) if model.config.decoder_start_token_id is None: raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined") prefix = args.source_prefix if args.source_prefix is not None else "" # Preprocessing the datasets. # First we tokenize all the texts. column_names = raw_datasets["train"].column_names # For translation we set the codes of our source and target languages (only useful for mBART, the others will # ignore those attributes). if isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)): if args.source_lang is not None: tokenizer.src_lang = args.source_lang if args.target_lang is not None: tokenizer.tgt_lang = args.target_lang # Get the language codes for input/target. source_lang = args.source_lang.split("_")[0] target_lang = args.target_lang.split("_")[0] padding = "max_length" if args.pad_to_max_length else False # Temporarily set max_target_length for training. max_target_length = args.max_target_length padding = "max_length" if args.pad_to_max_length else False def preprocess_function(examples): inputs = [ex[source_lang] for ex in examples["translation"]] targets = [ex[target_lang] for ex in examples["translation"]] inputs = [prefix + inp for inp in inputs] model_inputs = tokenizer(inputs, max_length=args.max_source_length, padding=padding, truncation=True) # Tokenize targets with the `text_target` keyword argument labels = tokenizer(text_target=targets, max_length=max_target_length, padding=padding, truncation=True) # If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore # padding in the loss. if padding == "max_length" and args.ignore_pad_token_for_loss: labels["input_ids"] = [ [(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"] ] model_inputs["labels"] = labels["input_ids"] return model_inputs with accelerator.main_process_first(): processed_datasets = raw_datasets.map( preprocess_function, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc="Running tokenizer on dataset", ) train_dataset = processed_datasets["train"] eval_dataset = processed_datasets["validation"] # Log a few random samples from the training set: for index in random.sample(range(len(train_dataset)), 3): logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") # DataLoaders creation: label_pad_token_id = -100 if args.ignore_pad_token_for_loss else tokenizer.pad_token_id if args.pad_to_max_length: # If padding was already done ot max length, we use the default data collator that will just convert everything # to tensors. data_collator = default_data_collator else: # Otherwise, `DataCollatorWithPadding` will apply dynamic padding for us (by padding to the maximum length of # the samples passed). When using mixed precision, we add `pad_to_multiple_of=8` to pad all tensors to multiple # of 8s, which will enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). data_collator = DataCollatorForSeq2Seq( tokenizer, model=model, label_pad_token_id=label_pad_token_id, pad_to_multiple_of=8 if accelerator.use_fp16 else None, ) train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=data_collator, batch_size=args.per_device_train_batch_size ) eval_dataloader = DataLoader(eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size) # Optimizer # Split weights in two groups, one with weight decay and the other not. no_decay = ["bias", "LayerNorm.weight", "layer_norm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay, }, { "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, ] optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args.learning_rate) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=args.max_train_steps, ) # Prepare everything with our `accelerator`. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # Figure out how many steps we should save the Accelerator states checkpointing_steps = args.checkpointing_steps if checkpointing_steps is not None and checkpointing_steps.isdigit(): checkpointing_steps = int(checkpointing_steps) # We need to initialize the trackers we use, and also store our configuration. # We initialize the trackers only on main process because `accelerator.log` # only logs on main process and we don't want empty logs/runs on other processes. if args.with_tracking: if accelerator.is_main_process: experiment_config = vars(args) # TensorBoard cannot log Enums, need the raw value experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value accelerator.init_trackers("translation_no_trainer", experiment_config) metric = evaluate.load("sacrebleu") def postprocess_text(preds, labels): preds = [pred.strip() for pred in preds] labels = [[label.strip()] for label in labels] return preds, labels # Train! total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") # Only show the progress bar once on each machine. progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) completed_steps = 0 starting_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": checkpoint_path = args.resume_from_checkpoint path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()] dirs.sort(key=os.path.getctime) path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last checkpoint_path = path path = os.path.basename(checkpoint_path) accelerator.print(f"Resumed from checkpoint: {checkpoint_path}") accelerator.load_state(checkpoint_path) # Extract `epoch_{i}` or `step_{i}` training_difference = os.path.splitext(path)[0] if "epoch" in training_difference: starting_epoch = int(training_difference.replace("epoch_", "")) + 1 resume_step = None completed_steps = starting_epoch * num_update_steps_per_epoch else: # need to multiply `gradient_accumulation_steps` to reflect real steps resume_step = int(training_difference.replace("step_", "")) * args.gradient_accumulation_steps starting_epoch = resume_step // len(train_dataloader) completed_steps = resume_step // args.gradient_accumulation_steps resume_step -= starting_epoch * len(train_dataloader) # update the progress_bar if load from checkpoint progress_bar.update(completed_steps) for epoch in range(starting_epoch, args.num_train_epochs): model.train() if args.with_tracking: total_loss = 0 if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We skip the first `n` batches in the dataloader when resuming from a checkpoint active_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step) else: active_dataloader = train_dataloader for step, batch in enumerate(active_dataloader): outputs = model(**batch) loss = outputs.loss # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() loss = loss / args.gradient_accumulation_steps accelerator.backward(loss) if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1: optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1) completed_steps += 1 if isinstance(checkpointing_steps, int): if completed_steps % checkpointing_steps == 0: output_dir = f"step_{completed_steps}" if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) if completed_steps >= args.max_train_steps: break model.eval() if args.val_max_target_length is None: args.val_max_target_length = args.max_target_length gen_kwargs = { "max_length": args.val_max_target_length if args is not None else config.max_length, "num_beams": args.num_beams, } samples_seen = 0 for step, batch in enumerate(eval_dataloader): with torch.no_grad(): generated_tokens = accelerator.unwrap_model(model).generate( batch["input_ids"], attention_mask=batch["attention_mask"], **gen_kwargs, ) generated_tokens = accelerator.pad_across_processes( generated_tokens, dim=1, pad_index=tokenizer.pad_token_id ) labels = batch["labels"] if not args.pad_to_max_length: # If we did not pad to max length, we need to pad the labels too labels = accelerator.pad_across_processes(batch["labels"], dim=1, pad_index=tokenizer.pad_token_id) generated_tokens = accelerator.gather(generated_tokens).cpu().numpy() labels = accelerator.gather(labels).cpu().numpy() if args.ignore_pad_token_for_loss: # Replace -100 in the labels as we can't decode them. labels = np.where(labels != -100, labels, tokenizer.pad_token_id) decoded_preds = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.num_processes > 1: if step == len(eval_dataloader) - 1: decoded_preds = decoded_preds[: len(eval_dataloader.dataset) - samples_seen] decoded_labels = decoded_labels[: len(eval_dataloader.dataset) - samples_seen] else: samples_seen += len(decoded_labels) metric.add_batch(predictions=decoded_preds, references=decoded_labels) eval_metric = metric.compute() logger.info({"bleu": eval_metric["score"]}) if args.with_tracking: accelerator.log( { "bleu": eval_metric["score"], "train_loss": total_loss.item() / len(train_dataloader), "epoch": epoch, "step": completed_steps, }, step=completed_steps, ) if args.push_to_hub and epoch < args.num_train_epochs - 1: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save ) if accelerator.is_main_process: tokenizer.save_pretrained(args.output_dir) api.upload_folder( commit_message=f"Training in progress epoch {epoch}", folder_path=args.output_dir, repo_id=repo_id, repo_type="model", token=args.hub_token, ) if args.checkpointing_steps == "epoch": output_dir = f"epoch_{epoch}" if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) if args.with_tracking: accelerator.end_training() if args.output_dir is not None: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save ) if accelerator.is_main_process: tokenizer.save_pretrained(args.output_dir) if args.push_to_hub: api.upload_folder( commit_message="End of training", folder_path=args.output_dir, repo_id=repo_id, repo_type="model", token=args.hub_token, ) with open(os.path.join(args.output_dir, "all_results.json"), "w") as f: json.dump({"eval_bleu": eval_metric["score"]}, f) if __name__ == "__main__": main()
transformers/examples/pytorch/translation/run_translation_no_trainer.py/0
{ "file_path": "transformers/examples/pytorch/translation/run_translation_no_trainer.py", "repo_id": "transformers", "token_count": 13757 }
300
# coding=utf-8 # Copyright 2019 The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ BertAbs configuration """ import logging from transformers import PretrainedConfig logger = logging.getLogger(__name__) BERTABS_FINETUNED_CONFIG_MAP = { "bertabs-finetuned-cnndm": "https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json", } class BertAbsConfig(PretrainedConfig): r"""Class to store the configuration of the BertAbs model. Arguments: vocab_size: int Number of tokens in the vocabulary. max_pos: int The maximum sequence length that this model will be used with. enc_layer: int The numner of hidden layers in the Transformer encoder. enc_hidden_size: int The size of the encoder's layers. enc_heads: int The number of attention heads for each attention layer in the encoder. enc_ff_size: int The size of the encoder's feed-forward layers. enc_dropout: int The dropout probability for all fully connected layers in the embeddings, layers, pooler and also the attention probabilities in the encoder. dec_layer: int The numner of hidden layers in the decoder. dec_hidden_size: int The size of the decoder's layers. dec_heads: int The number of attention heads for each attention layer in the decoder. dec_ff_size: int The size of the decoder's feed-forward layers. dec_dropout: int The dropout probability for all fully connected layers in the embeddings, layers, pooler and also the attention probabilities in the decoder. """ model_type = "bertabs" def __init__( self, vocab_size=30522, max_pos=512, enc_layers=6, enc_hidden_size=512, enc_heads=8, enc_ff_size=512, enc_dropout=0.2, dec_layers=6, dec_hidden_size=768, dec_heads=8, dec_ff_size=2048, dec_dropout=0.2, **kwargs, ): super().__init__(**kwargs) self.vocab_size = vocab_size self.max_pos = max_pos self.enc_layers = enc_layers self.enc_hidden_size = enc_hidden_size self.enc_heads = enc_heads self.enc_ff_size = enc_ff_size self.enc_dropout = enc_dropout self.dec_layers = dec_layers self.dec_hidden_size = dec_hidden_size self.dec_heads = dec_heads self.dec_ff_size = dec_ff_size self.dec_dropout = dec_dropout
transformers/examples/research_projects/bertabs/configuration_bertabs.py/0
{ "file_path": "transformers/examples/research_projects/bertabs/configuration_bertabs.py", "repo_id": "transformers", "token_count": 1345 }
301
from arguments import TokenizerTrainingArguments from datasets import load_dataset from tqdm import tqdm from transformers import AutoTokenizer, HfArgumentParser from transformers.models.gpt2.tokenization_gpt2 import bytes_to_unicode # Iterator for Training def batch_iterator(batch_size=10): for _ in tqdm(range(0, args.n_examples, batch_size)): yield [next(iter_dataset)[args.text_column] for _ in range(batch_size)] # Configuration parser = HfArgumentParser(TokenizerTrainingArguments) args = parser.parse_args() # Base tokenizer tokenizer = AutoTokenizer.from_pretrained(args.base_tokenizer) base_vocab = list(bytes_to_unicode().values()) # Load dataset dataset = load_dataset(args.dataset_name, split="train", streaming=True) iter_dataset = iter(dataset) # Training and saving new_tokenizer = tokenizer.train_new_from_iterator( batch_iterator(), vocab_size=args.vocab_size, initial_alphabet=base_vocab ) new_tokenizer.save_pretrained(args.tokenizer_name, push_to_hub=args.push_to_hub)
transformers/examples/research_projects/codeparrot/scripts/bpe_training.py/0
{ "file_path": "transformers/examples/research_projects/codeparrot/scripts/bpe_training.py", "repo_id": "transformers", "token_count": 347 }
302
from __future__ import absolute_import, division, print_function import argparse import glob import logging import os import random import time import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm, trange import transformers from src.modeling_highway_bert import DeeBertForSequenceClassification from src.modeling_highway_roberta import DeeRobertaForSequenceClassification from transformers import ( WEIGHTS_NAME, AdamW, BertConfig, BertTokenizer, RobertaConfig, RobertaTokenizer, get_linear_schedule_with_warmup, ) from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes as output_modes from transformers import glue_processors as processors from transformers.trainer_utils import is_main_process try: from torch.utils.tensorboard import SummaryWriter except ImportError: from tensorboardX import SummaryWriter logger = logging.getLogger(__name__) MODEL_CLASSES = { "bert": (BertConfig, DeeBertForSequenceClassification, BertTokenizer), "roberta": (RobertaConfig, DeeRobertaForSequenceClassification, RobertaTokenizer), } def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def get_wanted_result(result): if "spearmanr" in result: print_result = result["spearmanr"] elif "f1" in result: print_result = result["f1"] elif "mcc" in result: print_result = result["mcc"] elif "acc" in result: print_result = result["acc"] else: raise ValueError("Primary metric unclear in the results") return print_result def train(args, train_dataset, model, tokenizer, train_highway=False): """Train the model""" if args.local_rank in [-1, 0]: tb_writer = SummaryWriter() args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ["bias", "LayerNorm.weight"] if train_highway: optimizer_grouped_parameters = [ { "params": [ p for n, p in model.named_parameters() if ("highway" in n) and (not any(nd in n for nd in no_decay)) ], "weight_decay": args.weight_decay, }, { "params": [ p for n, p in model.named_parameters() if ("highway" in n) and (any(nd in n for nd in no_decay)) ], "weight_decay": 0.0, }, ] else: optimizer_grouped_parameters = [ { "params": [ p for n, p in model.named_parameters() if ("highway" not in n) and (not any(nd in n for nd in no_decay)) ], "weight_decay": args.weight_decay, }, { "params": [ p for n, p in model.named_parameters() if ("highway" not in n) and (any(nd in n for nd in no_decay)) ], "weight_decay": 0.0, }, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total ) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True ) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1), ) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 0 tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) set_seed(args) # Added here for reproducibility (even between python 2 and 3) for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): model.train() batch = tuple(t.to(args.device) for t in batch) inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if args.model_type != "distilbert": inputs["token_type_ids"] = ( batch[2] if args.model_type in ["bert", "xlnet"] else None ) # XLM, DistilBERT and RoBERTa don't use segment_ids inputs["train_highway"] = train_highway outputs = model(**inputs) loss = outputs[0] # model outputs are always tuple in transformers (see doc) if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # Log metrics if ( args.local_rank == -1 and args.evaluate_during_training ): # Only evaluate when single GPU otherwise metrics may not average well results = evaluate(args, model, tokenizer) for key, value in results.items(): tb_writer.add_scalar("eval_{}".format(key), value, global_step) tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step) tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step) logging_loss = tr_loss if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: # Save model checkpoint output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = ( model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, "training_args.bin")) logger.info("Saving model checkpoint to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step def evaluate(args, model, tokenizer, prefix="", output_layer=-1, eval_highway=False): # Loop to handle MNLI double evaluation (matched, mis-matched) eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,) eval_outputs_dirs = (args.output_dir, args.output_dir + "-MM") if args.task_name == "mnli" else (args.output_dir,) results = {} for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs): eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True) if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: os.makedirs(eval_output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset) eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) # multi-gpu eval if args.n_gpu > 1: model = nn.DataParallel(model) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(eval_dataset)) logger.info(" Batch size = %d", args.eval_batch_size) eval_loss = 0.0 nb_eval_steps = 0 preds = None out_label_ids = None exit_layer_counter = {(i + 1): 0 for i in range(model.num_layers)} st = time.time() for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if args.model_type != "distilbert": inputs["token_type_ids"] = ( batch[2] if args.model_type in ["bert", "xlnet"] else None ) # XLM, DistilBERT and RoBERTa don't use segment_ids if output_layer >= 0: inputs["output_layer"] = output_layer outputs = model(**inputs) if eval_highway: exit_layer_counter[outputs[-1]] += 1 tmp_eval_loss, logits = outputs[:2] eval_loss += tmp_eval_loss.mean().item() nb_eval_steps += 1 if preds is None: preds = logits.detach().cpu().numpy() out_label_ids = inputs["labels"].detach().cpu().numpy() else: preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0) eval_time = time.time() - st logger.info("Eval time: {}".format(eval_time)) eval_loss = eval_loss / nb_eval_steps if args.output_mode == "classification": preds = np.argmax(preds, axis=1) elif args.output_mode == "regression": preds = np.squeeze(preds) result = compute_metrics(eval_task, preds, out_label_ids) results.update(result) if eval_highway: logger.info("Exit layer counter: {}".format(exit_layer_counter)) actual_cost = sum([l * c for l, c in exit_layer_counter.items()]) full_cost = len(eval_dataloader) * model.num_layers logger.info("Expected saving: {}".format(actual_cost / full_cost)) if args.early_exit_entropy >= 0: save_fname = ( args.plot_data_dir + "/" + args.model_name_or_path[2:] + "/entropy_{}.npy".format(args.early_exit_entropy) ) if not os.path.exists(os.path.dirname(save_fname)): os.makedirs(os.path.dirname(save_fname)) print_result = get_wanted_result(result) np.save(save_fname, np.array([exit_layer_counter, eval_time, actual_cost / full_cost, print_result])) logger.info("Entropy={}\tResult={:.2f}".format(args.early_exit_entropy, 100 * print_result)) output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results {} *****".format(prefix)) for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) return results def load_and_cache_examples(args, task, tokenizer, evaluate=False): if args.local_rank not in [-1, 0] and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache processor = processors[task]() output_mode = output_modes[task] # Load data features from cache or dataset file cached_features_file = os.path.join( args.data_dir, "cached_{}_{}_{}_{}".format( "dev" if evaluate else "train", list(filter(None, args.model_name_or_path.split("/"))).pop(), str(args.max_seq_length), str(task), ), ) if os.path.exists(cached_features_file) and not args.overwrite_cache: logger.info("Loading features from cached file %s", cached_features_file) features = torch.load(cached_features_file) else: logger.info("Creating features from dataset file at %s", args.data_dir) label_list = processor.get_labels() if task in ["mnli", "mnli-mm"] and args.model_type in ["roberta"]: # HACK(label indices are swapped in RoBERTa pretrained model) label_list[1], label_list[2] = label_list[2], label_list[1] examples = ( processor.get_dev_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir) ) features = convert_examples_to_features( examples, tokenizer, label_list=label_list, max_length=args.max_seq_length, output_mode=output_mode, ) if args.local_rank in [-1, 0]: logger.info("Saving features into cached file %s", cached_features_file) torch.save(features, cached_features_file) if args.local_rank == 0 and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Convert to Tensors and build dataset all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long) if features[0].token_type_ids is None: # For RoBERTa (a potential bug!) all_token_type_ids = torch.tensor([[0] * args.max_seq_length for f in features], dtype=torch.long) else: all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long) if output_mode == "classification": all_labels = torch.tensor([f.label for f in features], dtype=torch.long) elif output_mode == "regression": all_labels = torch.tensor([f.label for f in features], dtype=torch.float) dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels) return dataset def main(): parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--data_dir", default=None, type=str, required=True, help="The input data dir. Should contain the .tsv files (or other data files) for the task.", ) parser.add_argument( "--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()), ) parser.add_argument( "--model_name_or_path", default=None, type=str, required=True, help="Path to pre-trained model or shortcut name.", ) parser.add_argument( "--task_name", default=None, type=str, required=True, help="The name of the task to train selected in the list: " + ", ".join(processors.keys()), ) parser.add_argument( "--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument( "--plot_data_dir", default="./plotting/", type=str, required=False, help="The directory to store data for plotting figures.", ) # Other parameters parser.add_argument( "--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name" ) parser.add_argument( "--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from huggingface.co", ) parser.add_argument( "--max_seq_length", default=128, type=int, help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ), ) parser.add_argument("--do_train", action="store_true", help="Whether to run training.") parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.") parser.add_argument( "--evaluate_during_training", action="store_true", help="Rul evaluation during training at each logging step." ) parser.add_argument( "--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model." ) parser.add_argument("--eval_each_highway", action="store_true", help="Set this flag to evaluate each highway.") parser.add_argument( "--eval_after_first_stage", action="store_true", help="Set this flag to evaluate after training only bert (not highway).", ) parser.add_argument("--eval_highway", action="store_true", help="Set this flag if it's evaluating highway models") parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") parser.add_argument( "--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation." ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument( "--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform." ) parser.add_argument( "--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.", ) parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument("--early_exit_entropy", default=-1, type=float, help="Entropy threshold for early exit.") parser.add_argument("--logging_steps", type=int, default=50, help="Log every X updates steps.") parser.add_argument("--save_steps", type=int, default=50, help="Save checkpoint every X updates steps.") parser.add_argument( "--eval_all_checkpoints", action="store_true", help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number", ) parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available") parser.add_argument( "--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory" ) parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" ) parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") parser.add_argument( "--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", ) parser.add_argument( "--fp16_opt_level", type=str, default="O1", help=( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. " "See details at https://nvidia.github.io/apex/amp.html" ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.") parser.add_argument("--server_port", type=str, default="", help="For distant debugging.") args = parser.parse_args() if ( os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir ): raise ValueError( "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format( args.output_dir ) ) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: # Initializes the distributed backend which will take care of synchronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend="nccl") args.n_gpu = 1 args.device = device # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16, ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set seed set_seed(args) # Prepare GLUE task args.task_name = args.task_name.lower() if args.task_name not in processors: raise ValueError("Task not found: %s" % (args.task_name)) processor = processors[args.task_name]() args.output_mode = output_modes[args.task_name] label_list = processor.get_labels() num_labels = len(label_list) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab args.model_type = args.model_type.lower() config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] config = config_class.from_pretrained( args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name, cache_dir=args.cache_dir if args.cache_dir else None, ) tokenizer = tokenizer_class.from_pretrained( args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None, ) model = model_class.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, cache_dir=args.cache_dir if args.cache_dir else None, ) if args.model_type == "bert": model.bert.encoder.set_early_exit_entropy(args.early_exit_entropy) model.bert.init_highway_pooler() elif args.model_type == "roberta": model.roberta.encoder.set_early_exit_entropy(args.early_exit_entropy) model.roberta.init_highway_pooler() else: raise NotImplementedError() if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Training if args.do_train: train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False) global_step, tr_loss = train(args, train_dataset, model, tokenizer) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) if args.eval_after_first_stage: result = evaluate(args, model, tokenizer, prefix="") print_result = get_wanted_result(result) train(args, train_dataset, model, tokenizer, train_highway=True) # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained() if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): # Create output directory if needed if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = ( model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training model_to_save.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, "training_args.bin")) # Load a trained model and vocabulary that you have fine-tuned model = model_class.from_pretrained(args.output_dir) tokenizer = tokenizer_class.from_pretrained(args.output_dir) model.to(args.device) # Evaluation results = {} if args.do_eval and args.local_rank in [-1, 0]: tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) checkpoints = [args.output_dir] if args.eval_all_checkpoints: checkpoints = [ os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True)) ] logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else "" prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else "" model = model_class.from_pretrained(checkpoint) if args.model_type == "bert": model.bert.encoder.set_early_exit_entropy(args.early_exit_entropy) elif args.model_type == "roberta": model.roberta.encoder.set_early_exit_entropy(args.early_exit_entropy) else: raise NotImplementedError() model.to(args.device) result = evaluate(args, model, tokenizer, prefix=prefix, eval_highway=args.eval_highway) print_result = get_wanted_result(result) logger.info("Result: {}".format(print_result)) if args.eval_each_highway: last_layer_results = print_result each_layer_results = [] for i in range(model.num_layers): logger.info("\n") _result = evaluate( args, model, tokenizer, prefix=prefix, output_layer=i, eval_highway=args.eval_highway ) if i + 1 < model.num_layers: each_layer_results.append(get_wanted_result(_result)) each_layer_results.append(last_layer_results) save_fname = args.plot_data_dir + "/" + args.model_name_or_path[2:] + "/each_layer.npy" if not os.path.exists(os.path.dirname(save_fname)): os.makedirs(os.path.dirname(save_fname)) np.save(save_fname, np.array(each_layer_results)) info_str = "Score of each layer:" for i in range(model.num_layers): info_str += " {:.2f}".format(100 * each_layer_results[i]) logger.info(info_str) result = {k + "_{}".format(global_step): v for k, v in result.items()} results.update(result) return results if __name__ == "__main__": main()
transformers/examples/research_projects/deebert/run_glue_deebert.py/0
{ "file_path": "transformers/examples/research_projects/deebert/run_glue_deebert.py", "repo_id": "transformers", "token_count": 13897 }
303
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Training the distilled model. Supported architectures include: BERT -> DistilBERT, RoBERTa -> DistilRoBERTa, GPT2 -> DistilGPT2. """ import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPT2Config, GPT2LMHeadModel, GPT2Tokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed MODEL_CLASSES = { "distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), "roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), "bert": (BertConfig, BertForMaskedLM, BertTokenizer), "gpt2": (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer), } def sanity_checks(args): """ A bunch of args sanity checks to perform even starting... """ assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def freeze_pos_embeddings(student, args): if args.student_type == "roberta": student.roberta.embeddings.position_embeddings.weight.requires_grad = False elif args.student_type == "gpt2": student.transformer.wpe.weight.requires_grad = False def freeze_token_type_embeddings(student, args): if args.student_type == "roberta": student.roberta.embeddings.token_type_embeddings.weight.requires_grad = False def main(): parser = argparse.ArgumentParser(description="Training") parser.add_argument("--force", action="store_true", help="Overwrite dump_path if it already exists.") parser.add_argument( "--dump_path", type=str, required=True, help="The output directory (log, checkpoints, parameters, etc.)" ) parser.add_argument( "--data_file", type=str, required=True, help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence.", ) parser.add_argument( "--student_type", type=str, choices=["distilbert", "roberta", "gpt2"], required=True, help="The student type (DistilBERT, RoBERTa).", ) parser.add_argument("--student_config", type=str, required=True, help="Path to the student configuration.") parser.add_argument( "--student_pretrained_weights", default=None, type=str, help="Load student initialization checkpoint." ) parser.add_argument( "--teacher_type", choices=["bert", "roberta", "gpt2"], required=True, help="Teacher type (BERT, RoBERTa)." ) parser.add_argument("--teacher_name", type=str, required=True, help="The teacher model.") parser.add_argument("--temperature", default=2.0, type=float, help="Temperature for the softmax temperature.") parser.add_argument( "--alpha_ce", default=0.5, type=float, help="Linear weight for the distillation loss. Must be >=0." ) parser.add_argument( "--alpha_mlm", default=0.0, type=float, help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.", ) parser.add_argument("--alpha_clm", default=0.5, type=float, help="Linear weight for the CLM loss. Must be >=0.") parser.add_argument("--alpha_mse", default=0.0, type=float, help="Linear weight of the MSE loss. Must be >=0.") parser.add_argument( "--alpha_cos", default=0.0, type=float, help="Linear weight of the cosine embedding loss. Must be >=0." ) parser.add_argument( "--mlm", action="store_true", help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." ) parser.add_argument( "--mlm_mask_prop", default=0.15, type=float, help="Proportion of tokens for which we need to make a prediction.", ) parser.add_argument("--word_mask", default=0.8, type=float, help="Proportion of tokens to mask out.") parser.add_argument("--word_keep", default=0.1, type=float, help="Proportion of tokens to keep.") parser.add_argument("--word_rand", default=0.1, type=float, help="Proportion of tokens to randomly replace.") parser.add_argument( "--mlm_smoothing", default=0.7, type=float, help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).", ) parser.add_argument("--token_counts", type=str, help="The token counts in the data_file for MLM.") parser.add_argument( "--restrict_ce_to_mask", action="store_true", help="If true, compute the distillation loss only the [MLM] prediction distribution.", ) parser.add_argument( "--freeze_pos_embs", action="store_true", help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.", ) parser.add_argument( "--freeze_token_type_embds", action="store_true", help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.", ) parser.add_argument("--n_epoch", type=int, default=3, help="Number of pass on the whole dataset.") parser.add_argument("--batch_size", type=int, default=5, help="Batch size (for each process).") parser.add_argument( "--group_by_size", action="store_false", help="If true, group sequences that have similar length into the same batch. Default is true.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=50, help="Gradient accumulation for larger training batches.", ) parser.add_argument("--warmup_prop", default=0.05, type=float, help="Linear warmup proportion.") parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") parser.add_argument("--learning_rate", default=5e-4, type=float, help="The initial learning rate for Adam.") parser.add_argument("--adam_epsilon", default=1e-6, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=5.0, type=float, help="Max gradient norm.") parser.add_argument("--initializer_range", default=0.02, type=float, help="Random initialization range.") parser.add_argument( "--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", ) parser.add_argument( "--fp16_opt_level", type=str, default="O1", help=( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. " "See details at https://nvidia.github.io/apex/amp.html" ), ) parser.add_argument("--n_gpu", type=int, default=1, help="Number of GPUs in the node.") parser.add_argument("--local_rank", type=int, default=-1, help="Distributed training - Local rank") parser.add_argument("--seed", type=int, default=56, help="Random seed") parser.add_argument("--log_interval", type=int, default=500, help="Tensorboard logging interval.") parser.add_argument("--checkpoint_interval", type=int, default=4000, help="Checkpoint interval.") args = parser.parse_args() sanity_checks(args) # ARGS # init_gpu_params(args) set_seed(args) if args.is_master: if os.path.exists(args.dump_path): if not args.force: raise ValueError( f"Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite" " itUse `--force` if you want to overwrite it" ) else: shutil.rmtree(args.dump_path) if not os.path.exists(args.dump_path): os.makedirs(args.dump_path) logger.info(f"Experiment will be dumped and logged in {args.dump_path}") # SAVE PARAMS # logger.info(f"Param: {args}") with open(os.path.join(args.dump_path, "parameters.json"), "w") as f: json.dump(vars(args), f, indent=4) git_log(args.dump_path) student_config_class, student_model_class, _ = MODEL_CLASSES[args.student_type] teacher_config_class, teacher_model_class, teacher_tokenizer_class = MODEL_CLASSES[args.teacher_type] # TOKENIZER # tokenizer = teacher_tokenizer_class.from_pretrained(args.teacher_name) special_tok_ids = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): idx = tokenizer.all_special_tokens.index(tok_symbol) special_tok_ids[tok_name] = tokenizer.all_special_ids[idx] logger.info(f"Special tokens {special_tok_ids}") args.special_tok_ids = special_tok_ids args.max_model_input_size = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(f"Loading data from {args.data_file}") with open(args.data_file, "rb") as fp: data = pickle.load(fp) if args.mlm: logger.info(f"Loading token counts from {args.token_counts} (already pre-computed)") with open(args.token_counts, "rb") as fp: counts = pickle.load(fp) token_probs = np.maximum(counts, 1) ** -args.mlm_smoothing for idx in special_tok_ids.values(): token_probs[idx] = 0.0 # do not predict special tokens token_probs = torch.from_numpy(token_probs) else: token_probs = None train_lm_seq_dataset = LmSeqsDataset(params=args, data=data) logger.info("Data loader created.") # STUDENT # logger.info(f"Loading student config from {args.student_config}") stu_architecture_config = student_config_class.from_pretrained(args.student_config) stu_architecture_config.output_hidden_states = True if args.student_pretrained_weights is not None: logger.info(f"Loading pretrained weights from {args.student_pretrained_weights}") student = student_model_class.from_pretrained(args.student_pretrained_weights, config=stu_architecture_config) else: student = student_model_class(stu_architecture_config) if args.n_gpu > 0: student.to(f"cuda:{args.local_rank}") logger.info("Student loaded.") # TEACHER # teacher = teacher_model_class.from_pretrained(args.teacher_name, output_hidden_states=True) if args.n_gpu > 0: teacher.to(f"cuda:{args.local_rank}") logger.info(f"Teacher loaded from {args.teacher_name}.") # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(student, args) if args.freeze_token_type_embds: freeze_token_type_embeddings(student, args) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() distiller = Distiller( params=args, dataset=train_lm_seq_dataset, token_probs=token_probs, student=student, teacher=teacher ) distiller.train() logger.info("Let's go get some drinks.") if __name__ == "__main__": main()
transformers/examples/research_projects/distillation/train.py/0
{ "file_path": "transformers/examples/research_projects/distillation/train.py", "repo_id": "transformers", "token_count": 5147 }
304
# Copyright 2022 - Intel Corp. All rights reserved. # Authors: Mayank Kumar Raunak, Javier Turek, Nicole Backage import copy import logging import random import joblib import numpy as np import torch import torch.nn as nn from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AdamW, GPT2LMHeadModel, get_linear_schedule_with_warmup logger = logging.getLogger(__name__) def set_seed(seed): """ For reproducible training Args: seed: A seed for reproducible training """ random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) def compute_perplexity(model, test_data, context_len): """ Computes perplexity of the transformer model on data in test_data Args: model: Pre-trained GPT2 model test_data: Data on which perplexity calculation is required context_len: The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded Returns: Perplexity on input test data """ model.eval() device = next(model.parameters()).device eval_batch_size = 1 context = torch.zeros((eval_batch_size, context_len), dtype=torch.long, device=device) eval_dataloader = DataLoader(test_data, shuffle=False, batch_size=eval_batch_size) eval_loss = torch.zeros(1, device=device) nb_eval_examples = 0 for batch in eval_dataloader: batch.to(device) # pad context.zero_() for i in range(eval_batch_size): context[i, :] = batch[i] outputs = model(context, labels=context) eval_loss += outputs[0].sum().item() nb_eval_examples += batch.size(0) eval_loss = eval_loss / nb_eval_examples perplexity = torch.exp(eval_loss) model.train() return perplexity def load_gpt2(model_name="openai-community/gpt2"): """ load original openai-community/gpt2 and save off for quicker loading Args: model_name: GPT-2 Returns: GPT-2 model """ model = GPT2LMHeadModel.from_pretrained(model_name, output_hidden_states=True) torch.save(model.state_dict(), model_name + "local.pt") return model def recopy_gpt2(orig_model, device, max_steps): """ Reset the model to the original pretrained GPT-2 weights after each iteration Args: orig_model: Original pretrained GPT-2 model imported from Transformers library device: CPU/GPU max_steps: number of training steps Returns: Original PreTrained GPT-2 model, lm_optimizer: Adam optimizer with Decoupled weight decay lm_scheduler: linear scheduler with the appropriate schedule """ model = copy.deepcopy(orig_model) model.to(device) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0}, ] lm_optimizer = AdamW(optimizer_grouped_parameters, lr=5e-5, eps=1e-8) lm_scheduler = get_linear_schedule_with_warmup(lm_optimizer, 0, max_steps) torch.cuda.empty_cache() return model, lm_optimizer, lm_scheduler def intermittent_save(contexts, real_perps, past_perps, filename): """ save the perplexity differences to filename Args: contexts: Example on which the perplexity is calculated real_perps: Perplexity after back-propagating on the selected context past_perps: Perplexity of model before training on the context filename: File to store perplexity differences Returns: file with perplexity differences """ # save the perplexity differences to filename avg = np.array(real_perps).mean() std = np.array(real_perps).std() perp_diff = (real_perps - avg) / std data_final = list(zip(contexts, perp_diff, past_perps)) joblib.dump(data_final, filename) def collect_objective_set( model, orig_perp, context_len, train_data, objective_set, max_steps, device, filename="dev.jbl", recopy_model=recopy_gpt2, ): """ Collect individual IGF values from pre-trained transformer model max_steps samples of training data to train secondary model Args: model: Pre-trained GPT2 model orig_perp: Perplexity of original pretrained GPT-2 model context_len: The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded train_data: Data to train model objective_set: Contexts used to create (X,IG(X)) pairs which is the training data for secondary learner max_steps: To calculate training epochs of model device: GPU/CPU filename: To store intermediate perplexity differences recopy_model: Reset the model to the original pretrained GPT-2 weights after each iteration Returns: file stored intermediate perplexity differences in intermediate stages """ # initialize variables to record relevant information contexts = [] real_perps = [] past_perps = [] # Initialize the transformer model orig_model = copy.deepcopy(model) orig_model.to(device="cpu") torch.cuda.empty_cache() # Compute perplexity of initial transformer model for comparison model.train() model, lm_optimizer, lm_scheduler = recopy_model(orig_model, device, max_steps) for step in tqdm(range(max_steps)): context = torch.zeros((1, context_len), dtype=torch.long, device=device) story = random.choice(train_data) start = random.randint(0, len(story[0]) - context_len - 1) context[0, :] = story[0][start : start + context_len] lm_optimizer.zero_grad() outputs = model(context, labels=context) lm_loss = outputs[0] past_perp = compute_perplexity(model, context, context_len) model.train() lm_loss.backward() # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters(), 3.0) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule # Compute perplexity after back-propagating on the selected context real_perp = compute_perplexity(model, objective_set, context_len) # Periodically save the stored (X, IG(X)) pairs if step % 1000 == 0 and step > 1: intermittent_save(contexts, real_perps, past_perps, filename) # Reset the pretrained model to the original pretrained GPT-2 weights after each iteration model, lm_optimizer, lm_scheduler = recopy_model(orig_model, device, max_steps) past_perps.append(past_perp.item()) real_perps.append(orig_perp - real_perp.item()) contexts.append(np.array(context.cpu())) intermittent_save(contexts, real_perps, past_perps, filename) def generate_datasets( context_len, file="data/tokenized_stories_train_wikitext103.jbl", number=100, min_len=1026, trim=True ): """ Generate objective set and training set Args: context_len: The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded file: Tokenized data split into training set and objective set number: size of objective dataset min_len: minimum length of a context in objective set trim: If True truncate the context if it exceeds context length Returns: Generated objective set and training data """ # Generate objective set and training set # Designate the first number (100) articles that are long enough to be used # as our objective set, rest (that are long enough) are training data for # secondary learner data = joblib.load(file) print("data loaded") objective_set = [] if trim: for i, example in enumerate(data): if len(example[0]) > min_len: start = random.randint(0, len(example[0]) - context_len - 1) objective_set.append(example[0, start : start + context_len]) if len(objective_set) >= number: break train_data = [] for j in range(i + 1, len(data)): if len(data[j][0]) > min_len: train_data.append(data[j]) else: objective_set = data[0:number] train_data = data[number:] joblib.dump(objective_set, "objective_set.jbl") print("objective set saved") return train_data, objective_set def train_secondary_learner( secondary_learner, train_dataset, max_epochs, batch_size, eval_freq=50, igf_model_path="secondary_learner.pt" ): """ Train the secondary learner (igf_model) Args: secondary_learner: secondary learner train_dataset: data to train secondary learner max_epochs: number of epochs to train secondary learner batch_size: batch size of training data of secondary learner eval_freq: secondary model evaluation can be triggered at eval_freq igf_model_path: path to store trained secondary learner Returns: Trained secondary learner """ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # We will use the first 512 pairs from our dataset as a test set for # our secondary learner and the rest to train test_dataset = train_dataset[:512] train_dataset = train_dataset[512:] train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size) test_dataloader = DataLoader(test_dataset, shuffle=False, batch_size=batch_size) # secondary learner model set up loss = nn.MSELoss() test_loss = nn.MSELoss(reduction="sum") secondary_learner.to(device) q_optimizer = torch.optim.Adam(secondary_learner.parameters(), lr=0.00001) secondary_learner.train() # TODO in original code this is written as number of actual batches seen # not number of items seen but other places it is number of items instead. # improve consistency! changed this to epochs for clarity best_test_loss = float("inf") # Iterate through batches until we've used max_steps batches for epoch in range(int(max_epochs)): tr_q_loss = 0.0 secondary_learner.train() for step, batch in enumerate(train_dataloader): context = batch[0].to(device) real_q = batch[1].to(device) predicted_q = secondary_learner(context) q_optimizer.zero_grad() q_loss = loss(predicted_q, real_q.float()) q_loss.backward() q_optimizer.step() tr_q_loss += q_loss.item() # model trains fairly quickly so we won't wait for a full epoch # eval is triggered at eval_freq and end of epochs if (step % eval_freq == 0 and step > 0) or ((step + 1) == len(train_dataloader)): tr_loss = tr_q_loss / (step + 1) secondary_learner.eval() q_loss2 = 0.0 sum_q2 = 0.0 predicted = [] actual = [] # Compute performance of the secondary learner after this batch for step2, batch2 in enumerate(test_dataloader): features2 = batch2[0].to(device) real_q2 = batch2[1].to(device) predicted_q2 = secondary_learner(features2) q_loss2 += test_loss(predicted_q2, real_q2).item() sum_q2 += torch.sum(predicted_q2).item() for ei, i in enumerate(predicted_q2.cpu().detach().numpy()): predicted.append(i.item()) for ei, i in enumerate(real_q2.cpu().detach().numpy()): actual.append(i.item()) q_loss2 /= len(test_dataset) print( "Epoch: ", epoch, "step: ", step, "Avg. q:", sum_q2 / len(test_dataset), "Train Loss: ", tr_loss, "Test Loss: ", q_loss2, ) if q_loss2 < best_test_loss: joblib.dump((predicted, actual), "pred_vs_actual.jbl") torch.save(secondary_learner.state_dict(), igf_model_path) best_test_loss = q_loss2 secondary_learner.train() return secondary_learner class SecondaryLearner(nn.Module): """ Our secondary learner """ def __init__(self, model): """ We use a simple convolutional network as our secondary learner Args: model: Pre-trained GPT2 model """ # embeddings are from the pretrained model super(SecondaryLearner, self).__init__() self.embeddings = model.transformer.wte self.embeddings.weight = copy.deepcopy(model.transformer.wte.weight) self.conv = nn.Conv1d(self.embeddings.weight.size(1), 256, 3, padding=1) self.fc = nn.Sequential(nn.Linear(256, 32), nn.Dropout(p=0.1), nn.Linear(32, 32), nn.Linear(32, 1)) def forward(self, context): """ Forward pass through the secondary learner Args: context: Context input to the secondary learner Returns: tensor after squeeze operation """ pooled = torch.max(self.conv(self.embeddings(context).squeeze(1).transpose(1, 2)), 2)[0] qs = self.fc(pooled) return qs.squeeze(1) @classmethod def from_pretrained(cls, state_path, model): """ Load the secondary learner Args: state_path: Path to save secondary learner model: Pretrained GPT-2 Returns: secondary learner """ secondary_learner = cls(model) # this calls __init__ state_dict = torch.load(state_path) secondary_learner.load_state_dict(state_dict) secondary_learner.embeddings = model.transformer.wte secondary_learner.embeddings.weight = copy.deepcopy(model.transformer.wte.weight) return secondary_learner
transformers/examples/research_projects/information-gain-filtration/igf/igf.py/0
{ "file_path": "transformers/examples/research_projects/information-gain-filtration/igf/igf.py", "repo_id": "transformers", "token_count": 6117 }
305
import copy from transformers.configuration_utils import PretrainedConfig from transformers.utils import logging logger = logging.get_logger(__name__) class HybridCLIPConfig(PretrainedConfig): r""" :class:`HybridCLIPConfig` is the configuration class to store the configuration of a :class:`~HybridCLIPModel`. It is used to instantiate HybridCLIPModel model according to the specified arguments, defining the text model and vision model configs. Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information. Args: text_config_dict (:obj:`dict`): Dictionary of configuration options that defines text model config. vision_config_dict (:obj:`dict`): Dictionary of configuration options that defines vison model config. projection_dim (:obj:`int`, `optional`, defaults to 512): Dimentionality of text and vision projection layers. kwargs (`optional`): Dictionary of keyword arguments. Examples:: >>> from transformers import BertConfig, CLIPConfig, HybridCLIPConfig, FlaxHybridCLIP >>> # Initializing a BERT and CLIP configuration >>> config_text = BertConfig() >>> config_vision = CLIPConfig() >>> config = HybridCLIPConfig.from_text_vision_configs(config_text, config_vision, projection_dim=512) >>> # Initializing a BERT and CLIPVision model >>> model = EncoderDecoderModel(config=config) >>> # Accessing the model configuration >>> config_text = model.config.text_config >>> config_vision = model.config.vision_config >>> # Saving the model, including its configuration >>> model.save_pretrained('my-model') >>> # loading model and config from pretrained folder >>> encoder_decoder_config = HybridCLIPConfig.from_pretrained('my-model') >>> model = FlaxHybridCLIP.from_pretrained('my-model', config=encoder_decoder_config) """ model_type = "hybrid-clip" is_composition = True def __init__(self, projection_dim=512, **kwargs): super().__init__(**kwargs) if "text_config" not in kwargs: raise ValueError("`text_config` can not be `None`.") if "vision_config" not in kwargs: raise ValueError("`vision_config` can not be `None`.") text_config = kwargs.pop("text_config") vision_config = kwargs.pop("vision_config") text_model_type = text_config.pop("model_type") vision_model_type = vision_config.pop("model_type") from transformers import AutoConfig self.text_config = AutoConfig.for_model(text_model_type, **text_config) if vision_model_type == "clip": self.vision_config = AutoConfig.for_model(vision_model_type, **vision_config).vision_config elif vision_model_type == "clip_vision_model": from transformers import CLIPVisionConfig self.vision_config = CLIPVisionConfig(**vision_config) else: self.vision_config = AutoConfig.for_model(vision_model_type, **vision_config) self.projection_dim = projection_dim self.initializer_factor = 1.0 @classmethod def from_text_vision_configs(cls, text_config: PretrainedConfig, vision_config: PretrainedConfig, **kwargs): r""" Instantiate a :class:`HybridCLIPConfig` (or a derived class) from text model configuration and vision model configuration. Returns: :class:`HybridCLIPConfig`: An instance of a configuration object """ return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs) def to_dict(self): """ Serializes this instance to a Python dictionary. Override the default :meth:`~transformers.PretrainedConfig.to_dict`. Returns: :obj:`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance, """ output = copy.deepcopy(self.__dict__) output["text_config"] = self.text_config.to_dict() output["vision_config"] = self.vision_config.to_dict() output["model_type"] = self.__class__.model_type return output
transformers/examples/research_projects/jax-projects/hybrid_clip/configuration_hybrid_clip.py/0
{ "file_path": "transformers/examples/research_projects/jax-projects/hybrid_clip/configuration_hybrid_clip.py", "repo_id": "transformers", "token_count": 1634 }
306
# Token classification ## PyTorch version, no Trainer Fine-tuning (m)LUKE for token classification task such as Named Entity Recognition (NER), Parts-of-speech tagging (POS) or phrase extraction (CHUNKS). You can easily customize it to your needs if you need extra processing on your datasets. It will either run on a datasets hosted on our [hub](https://huggingface.co/datasets) or with your own text files for training and validation, you might just need to add some tweaks in the data preprocessing. The script can be run in a distributed setup, on TPU and supports mixed precision by the mean of the [🤗 `Accelerate`](https://github.com/huggingface/accelerate) library. You can use the script normally after installing it: ```bash pip install git+https://github.com/huggingface/accelerate ``` then to train English LUKE on CoNLL2003: ```bash export TASK_NAME=ner python run_luke_ner_no_trainer.py \ --model_name_or_path studio-ousia/luke-base \ --dataset_name conll2003 \ --task_name $TASK_NAME \ --max_length 128 \ --per_device_train_batch_size 32 \ --learning_rate 2e-5 \ --num_train_epochs 3 \ --output_dir /tmp/$TASK_NAME/ ``` You can then use your usual launchers to run in it in a distributed environment, but the easiest way is to run ```bash accelerate config ``` and reply to the questions asked. Then ```bash accelerate test ``` that will check everything is ready for training. Finally, you can launch training with ```bash export TASK_NAME=ner accelerate launch run_ner_no_trainer.py \ --model_name_or_path studio-ousia/luke-base \ --dataset_name conll2003 \ --task_name $TASK_NAME \ --max_length 128 \ --per_device_train_batch_size 32 \ --learning_rate 2e-5 \ --num_train_epochs 3 \ --output_dir /tmp/$TASK_NAME/ ``` This command is the same and will work for: - a CPU-only setup - a setup with one GPU - a distributed training with several GPUs (single or multi node) - a training on TPUs Note that this library is in alpha release so your feedback is more than welcome if you encounter any problem using it.
transformers/examples/research_projects/luke/README.md/0
{ "file_path": "transformers/examples/research_projects/luke/README.md", "repo_id": "transformers", "token_count": 667 }
307
# coding=utf-8 # Copyright (c) Facebook, Inc. and its affiliates. # Copyright (c) HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Finetuning the library models for multimodal multiclass prediction on MM-IMDB dataset.""" import argparse import glob import json import logging import os import random import numpy as np import torch from sklearn.metrics import f1_score from torch import nn from torch.utils.data import DataLoader, RandomSampler, SequentialSampler from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm, trange from utils_mmimdb import ImageEncoder, JsonlDataset, collate_fn, get_image_transforms, get_mmimdb_labels import transformers from transformers import ( WEIGHTS_NAME, AdamW, AutoConfig, AutoModel, AutoTokenizer, MMBTConfig, MMBTForClassification, get_linear_schedule_with_warmup, ) from transformers.trainer_utils import is_main_process try: from torch.utils.tensorboard import SummaryWriter except ImportError: from tensorboardX import SummaryWriter logger = logging.getLogger(__name__) def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def train(args, train_dataset, model, tokenizer, criterion): """Train the model""" if args.local_rank in [-1, 0]: tb_writer = SummaryWriter() args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader( train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, collate_fn=collate_fn, num_workers=args.num_workers, ) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay, }, {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0}, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total ) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True ) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1), ) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 0 tr_loss, logging_loss = 0.0, 0.0 best_f1, n_no_improve = 0, 0 model.zero_grad() train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) set_seed(args) # Added here for reproducibility for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): model.train() batch = tuple(t.to(args.device) for t in batch) labels = batch[5] inputs = { "input_ids": batch[0], "input_modal": batch[2], "attention_mask": batch[1], "modal_start_tokens": batch[3], "modal_end_tokens": batch[4], } outputs = model(**inputs) logits = outputs[0] # model outputs are always tuple in transformers (see doc) loss = criterion(logits, labels) if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: logs = {} if ( args.local_rank == -1 and args.evaluate_during_training ): # Only evaluate when single GPU otherwise metrics may not average well results = evaluate(args, model, tokenizer, criterion) for key, value in results.items(): eval_key = "eval_{}".format(key) logs[eval_key] = value loss_scalar = (tr_loss - logging_loss) / args.logging_steps learning_rate_scalar = scheduler.get_lr()[0] logs["learning_rate"] = learning_rate_scalar logs["loss"] = loss_scalar logging_loss = tr_loss for key, value in logs.items(): tb_writer.add_scalar(key, value, global_step) print(json.dumps({**logs, **{"step": global_step}})) if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: # Save model checkpoint output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = ( model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training torch.save(model_to_save.state_dict(), os.path.join(output_dir, WEIGHTS_NAME)) torch.save(args, os.path.join(output_dir, "training_args.bin")) logger.info("Saving model checkpoint to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break if args.local_rank == -1: results = evaluate(args, model, tokenizer, criterion) if results["micro_f1"] > best_f1: best_f1 = results["micro_f1"] n_no_improve = 0 else: n_no_improve += 1 if n_no_improve > args.patience: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step def evaluate(args, model, tokenizer, criterion, prefix=""): # Loop to handle MNLI double evaluation (matched, mis-matched) eval_output_dir = args.output_dir eval_dataset = load_examples(args, tokenizer, evaluate=True) if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: os.makedirs(eval_output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(eval_dataset) eval_dataloader = DataLoader( eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size, collate_fn=collate_fn ) # multi-gpu eval if args.n_gpu > 1 and not isinstance(model, nn.DataParallel): model = nn.DataParallel(model) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(eval_dataset)) logger.info(" Batch size = %d", args.eval_batch_size) eval_loss = 0.0 nb_eval_steps = 0 preds = None out_label_ids = None for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): batch = tuple(t.to(args.device) for t in batch) labels = batch[5] inputs = { "input_ids": batch[0], "input_modal": batch[2], "attention_mask": batch[1], "modal_start_tokens": batch[3], "modal_end_tokens": batch[4], } outputs = model(**inputs) logits = outputs[0] # model outputs are always tuple in transformers (see doc) tmp_eval_loss = criterion(logits, labels) eval_loss += tmp_eval_loss.mean().item() nb_eval_steps += 1 if preds is None: preds = torch.sigmoid(logits).detach().cpu().numpy() > 0.5 out_label_ids = labels.detach().cpu().numpy() else: preds = np.append(preds, torch.sigmoid(logits).detach().cpu().numpy() > 0.5, axis=0) out_label_ids = np.append(out_label_ids, labels.detach().cpu().numpy(), axis=0) eval_loss = eval_loss / nb_eval_steps result = { "loss": eval_loss, "macro_f1": f1_score(out_label_ids, preds, average="macro"), "micro_f1": f1_score(out_label_ids, preds, average="micro"), } output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results {} *****".format(prefix)) for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) return result def load_examples(args, tokenizer, evaluate=False): path = os.path.join(args.data_dir, "dev.jsonl" if evaluate else "train.jsonl") transforms = get_image_transforms() labels = get_mmimdb_labels() dataset = JsonlDataset(path, tokenizer, transforms, labels, args.max_seq_length - args.num_image_embeds - 2) return dataset def main(): parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--data_dir", default=None, type=str, required=True, help="The input data dir. Should contain the .jsonl files for MMIMDB.", ) parser.add_argument( "--model_name_or_path", default=None, type=str, required=True, help="Path to pretrained model or model identifier from huggingface.co/models", ) parser.add_argument( "--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.", ) # Other parameters parser.add_argument( "--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name" ) parser.add_argument( "--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--cache_dir", default=None, type=str, help="Where do you want to store the pre-trained models downloaded from huggingface.co", ) parser.add_argument( "--max_seq_length", default=128, type=int, help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ), ) parser.add_argument( "--num_image_embeds", default=1, type=int, help="Number of Image Embeddings from the Image Encoder" ) parser.add_argument("--do_train", action="store_true", help="Whether to run training.") parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.") parser.add_argument( "--evaluate_during_training", action="store_true", help="Rul evaluation during training at each logging step." ) parser.add_argument( "--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model." ) parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") parser.add_argument( "--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation." ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument( "--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform." ) parser.add_argument("--patience", default=5, type=int, help="Patience for Early Stopping.") parser.add_argument( "--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.", ) parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument("--logging_steps", type=int, default=50, help="Log every X updates steps.") parser.add_argument("--save_steps", type=int, default=50, help="Save checkpoint every X updates steps.") parser.add_argument( "--eval_all_checkpoints", action="store_true", help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number", ) parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available") parser.add_argument("--num_workers", type=int, default=8, help="number of worker threads for dataloading") parser.add_argument( "--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory" ) parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" ) parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") parser.add_argument( "--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", ) parser.add_argument( "--fp16_opt_level", type=str, default="O1", help=( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. " "See details at https://nvidia.github.io/apex/amp.html" ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.") parser.add_argument("--server_port", type=str, default="", help="For distant debugging.") args = parser.parse_args() if ( os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir ): raise ValueError( "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format( args.output_dir ) ) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count() else: # Initializes the distributed backend which will take care of synchronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend="nccl") args.n_gpu = 1 args.device = device # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16, ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set seed set_seed(args) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab # Setup model labels = get_mmimdb_labels() num_labels = len(labels) transformer_config = AutoConfig.from_pretrained(args.config_name if args.config_name else args.model_name_or_path) tokenizer = AutoTokenizer.from_pretrained( args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir, ) transformer = AutoModel.from_pretrained( args.model_name_or_path, config=transformer_config, cache_dir=args.cache_dir ) img_encoder = ImageEncoder(args) config = MMBTConfig(transformer_config, num_labels=num_labels) model = MMBTForClassification(config, transformer, img_encoder) if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Training if args.do_train: train_dataset = load_examples(args, tokenizer, evaluate=False) label_frequences = train_dataset.get_label_frequencies() label_frequences = [label_frequences[l] for l in labels] label_weights = ( torch.tensor(label_frequences, device=args.device, dtype=torch.float) / len(train_dataset) ) ** -1 criterion = nn.BCEWithLogitsLoss(pos_weight=label_weights) global_step, tr_loss = train(args, train_dataset, model, tokenizer, criterion) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained() if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = ( model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training torch.save(model_to_save.state_dict(), os.path.join(args.output_dir, WEIGHTS_NAME)) tokenizer.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, "training_args.bin")) # Load a trained model and vocabulary that you have fine-tuned model = MMBTForClassification(config, transformer, img_encoder) model.load_state_dict(torch.load(os.path.join(args.output_dir, WEIGHTS_NAME))) tokenizer = AutoTokenizer.from_pretrained(args.output_dir) model.to(args.device) # Evaluation results = {} if args.do_eval and args.local_rank in [-1, 0]: checkpoints = [args.output_dir] if args.eval_all_checkpoints: checkpoints = [ os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True)) ] logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else "" prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else "" model = MMBTForClassification(config, transformer, img_encoder) model.load_state_dict(torch.load(checkpoint)) model.to(args.device) result = evaluate(args, model, tokenizer, criterion, prefix=prefix) result = {k + "_{}".format(global_step): v for k, v in result.items()} results.update(result) return results if __name__ == "__main__": main()
transformers/examples/research_projects/mm-imdb/run_mmimdb.py/0
{ "file_path": "transformers/examples/research_projects/mm-imdb/run_mmimdb.py", "repo_id": "transformers", "token_count": 10119 }
308
import copy import itertools from typing import List, Optional, Tuple import torch import torch.nn.functional as F from transformers import BartConfig from transformers.generation import GenerationMixin def _convert_past_list_to_tuple(past_key_values): """ In Bart model, the type of past_key_values is tuple(tuple(torch.FloatTensor)) which is not TorchScript-compatible. To support this, we have to convert it during the export process. This function will convert past values from a list to tuple(tuple(torch.FloatTensor)) for the inner decoder. According to the definition of past_key_values, each inner tuple(torch.FloatTensor) has 4 tensors, so we convert every 4 elements in the list as a tuple(torch.FloatTensor). """ count_of_each_inner_tuple = 4 results = () temp_result = () count_n = len(past_key_values) // count_of_each_inner_tuple for idx in range(count_n): real_idx = idx * count_of_each_inner_tuple temp_result = tuple(past_key_values[real_idx : real_idx + count_of_each_inner_tuple]) results += ((temp_result),) return results class EncoderForONNX(torch.nn.Module): def __init__(self, encoder): super().__init__() self.encoder = encoder def forward(self, input_ids, attention_mask): return self.encoder( input_ids=input_ids, attention_mask=attention_mask, return_dict=False, ) class DecoderForONNX(torch.nn.Module): def __init__(self, decoder): super().__init__() self.decoder = decoder def forward(self, input_ids, encoder_state, attention_mask, past=None): all_results = None if past is not None: all_results = _convert_past_list_to_tuple(past) input_ids = input_ids[:, -1:] last_hidden_state, past_key_values = self.decoder( input_ids=input_ids, encoder_hidden_states=encoder_state, encoder_attention_mask=attention_mask, past_key_values=all_results, return_dict=False, ) past_values = [] for past in past_key_values: past_values = past_values + list(past) return last_hidden_state, past_values def _create_traced_encoder(encoder, input_ids, attention_mask): encoder_c = copy.deepcopy(encoder) encoder_for_onnx = EncoderForONNX(encoder_c) return torch.jit.trace(encoder_for_onnx, (input_ids, attention_mask)) def _create_traced_decoder(decoder, input_ids, encoder_state, attention_mask, past=None): decoder_c = copy.deepcopy(decoder) decoder_for_onnx = DecoderForONNX(decoder_c) past_values = list(itertools.chain.from_iterable(past or ())) # Do this twice so we got 2 different decoders for further work. if past_values: return torch.jit.trace(decoder_for_onnx, (input_ids, encoder_state, attention_mask, past_values)) else: return torch.jit.trace(decoder_for_onnx, (input_ids, encoder_state, attention_mask)) class BartConfigTS(BartConfig, torch.nn.Module): """ BartConfigTS is a TorchScript-compatible transformers.models.bart.configuration_bart.BartConfig. TorchScript only supports sub-classes of torch.nn.Module. """ def __init__(self, config): BartConfig.__init__(self, config) torch.nn.Module.__init__(self) class MinLengthLogitsProcessorTS(torch.nn.Module): r""" :class:`transformers.LogitsProcessor` enforcing a min-length by setting EOS probability to 0. Args: min_length (:obj:`int`): The minimum length below which the score of :obj:`eos_token_id` is set to :obj:`-float("Inf")`. eos_token_id (:obj:`int`): The id of the `end-of-sequence` token. """ def __init__(self, min_length: int, eos_token_id: int): super().__init__() if not isinstance(min_length, int) or min_length < 0: raise ValueError(f"`min_length` has to be a positive integer, but is {min_length}") if not isinstance(eos_token_id, int) or eos_token_id < 0: raise ValueError(f"`eos_token_id` has to be a positive integer, but is {eos_token_id}") self.min_length = min_length self.eos_token_id = eos_token_id def forward(self, input_ids, scores) -> torch.Tensor: cur_len = input_ids.shape[-1] if cur_len < self.min_length: scores[:, self.eos_token_id] = -float("inf") return scores class BARTGenerator(torch.nn.Module, GenerationMixin): def __init__(self, model): super().__init__() self.config = BartConfigTS(model.config) self.config.force_bos_token_to_be_generated = False self._trace_modules(model) self.logits_processor = MinLengthLogitsProcessorTS(self.config.min_length, self.config.eos_token_id) self.final_logits_weight = model.model.shared.weight self.final_logits_bias = model.final_logits_bias self.decoder_layers = model.config.decoder_layers def _trace_modules(self, model): input_ids = torch.tensor( [ [ 19, 669, 18, 420, 8, 664, 57, 42, 8, 664, 21, 3028, 195, 4445, 331, 1293, 34, 21, 10, 6174, 1100, 6, 69, 104, 42, 32, 2621, 1638, 144, 4, 6174, 558, 108, 4419, 1091, 28, 4, 1668, 9, 1509, 1621, 279, 35, 867, 2734, 85, 11, 2216, 2734, 85, 203, 2244, 7, 6, 15, 8102, 7, 57, 8629, 5, model.config.eos_token_id, ] ], device=model.device, dtype=torch.long, ) attention_mask = torch.tensor( [[True] * input_ids.shape[-1]], device=model.device, dtype=torch.bool, ) self.encoder = _create_traced_encoder(model.get_encoder(), input_ids, attention_mask) encoder_outputs = model.get_encoder()(input_ids, attention_mask=attention_mask, return_dict=True) decoder = model.model.decoder decoder_outputs = decoder(input_ids, attention_mask, encoder_outputs["last_hidden_state"], None, None, None) self.decoder_no_past = _create_traced_decoder( model.model.decoder, input_ids, encoder_outputs["last_hidden_state"], attention_mask ) self.decoder_with_past = _create_traced_decoder( model.model.decoder, input_ids, encoder_outputs["last_hidden_state"], attention_mask, decoder_outputs[1] ) def _encoder_forward(self, input_ids, attention_mask): return self.encoder(input_ids, attention_mask)[0] @staticmethod def _init_sequence_length_for_generation( input_ids: torch.LongTensor, max_length: int ) -> Tuple[torch.Tensor, torch.Tensor, int]: unfinished_sequences = torch.zeros(input_ids.shape[0], dtype=torch.long, device=input_ids.device) + 1 sequence_lengths = torch.zeros(input_ids.shape[0], dtype=torch.long, device=input_ids.device) + max_length cur_len = input_ids.shape[-1] return sequence_lengths, unfinished_sequences, cur_len def _decoder_forward(self, input_ids, encoder_output, attention_mask, past: List[torch.Tensor]): # Update here to use different decoder for different values of past. if past is None or len(past) == 0: decoder_output, past = self.decoder_no_past( input_ids=input_ids, encoder_state=encoder_output, attention_mask=attention_mask ) else: decoder_output, past = self.decoder_with_past( input_ids=input_ids, encoder_state=encoder_output, attention_mask=attention_mask, past=past ) lm_logits = F.linear(decoder_output, self.final_logits_weight, bias=self.final_logits_bias) return lm_logits, past def greedy_search( self, input_ids, encoder_output, attention_mask, max_length, pad_token_id: int, eos_token_id: int ): # init sequence length tensors sequence_lengths, unfinished_sequences, cur_len = self._init_sequence_length_for_generation( input_ids, max_length ) past: List[torch.Tensor] = [] while cur_len < max_length: logits, past = self._decoder_forward(input_ids, encoder_output, attention_mask, past) next_token_logits = logits[:, -1, :] # pre-process distribution scores = self.logits_processor(input_ids, next_token_logits) # argmax next_tokens = torch.argmax(scores, dim=-1) # add code that transfomers next_tokens to tokens_to_add if eos_token_id is not None: assert pad_token_id is not None, "If eos_token_id is defined, make sure that pad_token_id is defined." next_tokens = next_tokens * unfinished_sequences + (pad_token_id) * (1 - unfinished_sequences) # add token and increase length by one input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1) # update sequence length if eos_token_id is not None: sequence_lengths, unfinished_sequences = self._update_seq_length_for_generation( sequence_lengths, unfinished_sequences, cur_len, next_tokens == eos_token_id ) # stop when there is a </s> in each sentence, or if we exceed the maximul length if unfinished_sequences.max() == 0: break # increase cur_len cur_len = cur_len + 1 return input_ids def _prepare_decoder_input_ids_for_generation( self, input_ids: torch.LongTensor, decoder_start_token_id, bos_token_id: Optional[int] = None, ) -> torch.LongTensor: decoder_input_ids = ( torch.ones((input_ids.shape[0], 1), dtype=input_ids.dtype, device=input_ids.device) * decoder_start_token_id ) return decoder_input_ids def forward(self, input_ids, attention_mask, max_length, decoder_start_token_id): pad_token_id = self.config.pad_token_id bos_token_id = self.config.bos_token_id eos_token_id = self.config.eos_token_id # special case if pad_token_id is not defined if pad_token_id is None and eos_token_id is not None: # Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation. pad_token_id = eos_token_id encoder_output = self._encoder_forward(input_ids, attention_mask) input_ids = self._prepare_decoder_input_ids_for_generation( input_ids, decoder_start_token_id=decoder_start_token_id, bos_token_id=bos_token_id, ) return self.greedy_search( input_ids, encoder_output, attention_mask, max_length=max_length, pad_token_id=pad_token_id, eos_token_id=eos_token_id, ) # TorchScript compatible BeamSearchScorer class BeamSearchScorerTS(torch.nn.Module): def __init__(self): super().__init__() self.max_length: int = 200 self.num_beams: int = 3 self.batch_size: int = 1 self.length_penalty: float = 1.0 self.do_early_stopping: bool = True self.num_beam_hyps_to_keep: int = 1 self.num_beam_groups: int = 1 self.group_size: int = self.num_beams // self.num_beam_groups self._done = torch.zeros(self.batch_size, dtype=torch.bool) self._beam_hyps_count = torch.zeros(self.batch_size, dtype=torch.long) self._beam_hyps_worst_scores = torch.zeros(self.batch_size) + 1e9 self._beam_hyps_max_length: int = self.max_length - 1 self._beam_hyps: List[torch.Tensor] = [torch.zeros(2)] # placeholder for TorchScript compatibility self._beam_scores: List[torch.Tensor] = [torch.zeros(2)] # placeholder for TorchScript compatibility def is_done(self) -> torch.Tensor: return self._done.all() def init( self, batch_size: int, max_length: int, num_beams: int, device: torch.device, length_penalty: float = 1.0, do_early_stopping: bool = False, num_beam_hyps_to_keep: int = 1, num_beam_groups: int = 1, ): self.max_length = max_length self.num_beams = num_beams self.batch_size = batch_size self.length_penalty = length_penalty self.do_early_stopping = do_early_stopping self.num_beam_hyps_to_keep = num_beam_hyps_to_keep self.num_beam_groups = num_beam_groups self.group_size = self.num_beams // self.num_beam_groups # NOTE: TorchScript does not support List of Modules # Rewritten BeamHypotheses with tensors and list of tensors. self._done = torch.zeros(batch_size, dtype=torch.bool, device=device) self._beam_hyps_count = torch.zeros(batch_size, dtype=torch.long, device=device) self._beam_hyps_worst_scores = torch.zeros(batch_size, device=device) + 1e9 self._beam_hyps = [] self._beam_scores = [] self._beam_hyps_max_length = max_length - 1 # ignoring bos_token if not isinstance(num_beams, int) or num_beams <= 1: raise ValueError( f"`num_beams` has to be an integer strictly greater than 1, but is {num_beams}. For `num_beams` == 1," " one should make use of `greedy_search` instead." ) if not isinstance(num_beam_groups, int) or (num_beam_groups > num_beams) or (num_beams % num_beam_groups != 0): raise ValueError( "`num_beam_groups` has to be an integer smaller or equal than `num_beams` and `num_beams` has to be" f" divisible by `num_beam_groups`, but is {num_beam_groups} with `num_beams` being {num_beams}." ) def hypo_len(self, hypo_idx: int): """ Number of hypotheses in the list. """ return self._beam_hyps_count[hypo_idx] def hypo_add(self, hyp: torch.Tensor, sum_logprobs: float, hypo_idx: int): """ Add a new hypothesis to the list. """ score = sum_logprobs / (hyp.shape[-1] ** self.length_penalty) hyps_count = self.hypo_len(hypo_idx) if hyps_count < self.num_beams or score > self._beam_hyps_worst_scores[hypo_idx]: # NOTE: work around difference of torch.sum(empty_tensor) == 0, while error in onnx. # Bug: https://msdata.visualstudio.com/Vienna/_workitems/edit/1486599 beam_idx = ( torch.sum(self._beam_hyps_count[:hypo_idx]) if hypo_idx != 0 else torch.tensor(0, dtype=torch.long) ) self._beam_scores.insert(beam_idx, torch.tensor([score])) self._beam_hyps.insert(beam_idx, hyp) if hyps_count + 1 > self.num_beams: sorted_next_scores, sorted_indices = torch.topk( torch.cat(self._beam_scores)[beam_idx : beam_idx + hyps_count + 1], hyps_count + 1, largest=False ) del self._beam_hyps[int((sorted_indices[0] + beam_idx))] del self._beam_scores[int((sorted_indices[0] + beam_idx))] self._beam_hyps_worst_scores[hypo_idx] = sorted_next_scores[1] else: self._beam_hyps_worst_scores[hypo_idx] = min(score, self._beam_hyps_worst_scores[hypo_idx]) self._beam_hyps_count[hypo_idx] = hyps_count + 1 def hypo_is_done(self, hypo_idx: int, best_sum_logprobs: float, cur_len: int) -> bool: """ If there are enough hypotheses and that none of the hypotheses being generated can become better than the worst one in the heap, then we are done with this sentence. """ if self.hypo_len(hypo_idx) < self.num_beams: return False elif self.do_early_stopping: return True else: cur_score = best_sum_logprobs / cur_len**self.length_penalty ret = self._beam_hyps_worst_scores[hypo_idx].item() >= cur_score return ret def process( self, input_ids: torch.Tensor, next_scores: torch.Tensor, next_tokens: torch.Tensor, next_indices: torch.Tensor, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: cur_len = input_ids.shape[-1] batch_size = len(self._beam_hyps_count) assert batch_size == (input_ids.shape[0] // self.group_size) device = input_ids.device next_beam_scores = torch.zeros((batch_size, self.group_size), dtype=next_scores.dtype, device=device) next_beam_tokens = torch.zeros((batch_size, self.group_size), dtype=next_tokens.dtype, device=device) next_beam_indices = torch.zeros((batch_size, self.group_size), dtype=next_indices.dtype, device=device) for batch_idx in range(batch_size): if self._done[batch_idx]: assert ( self.hypo_len(batch_idx) >= self.num_beams ), "Batch can only be done if at least {} beams have been generated".format(self.num_beams) assert ( eos_token_id is not None and pad_token_id is not None ), "generated beams >= num_beams -> eos_token_id and pad_token have to be defined" # pad the batch next_beam_scores[batch_idx, :] = 0 next_beam_tokens[batch_idx, :] = pad_token_id next_beam_indices[batch_idx, :] = 0 continue # next tokens for this sentence beam_idx = 0 for beam_token_rank, (next_token, next_score, next_index) in enumerate( zip(next_tokens[batch_idx], next_scores[batch_idx], next_indices[batch_idx]) ): batch_beam_idx = batch_idx * self.group_size + next_index # add to generated hypotheses if end of sentence if (eos_token_id is not None) and (next_token == eos_token_id): # if beam_token does not belong to top num_beams tokens, it should not be added is_beam_token_worse_than_top_num_beams = beam_token_rank >= self.group_size if is_beam_token_worse_than_top_num_beams: continue self.hypo_add( input_ids[batch_beam_idx].clone(), next_score.item(), batch_idx, ) else: # add next predicted token since it is not eos_token next_beam_scores[batch_idx, beam_idx] = next_score next_beam_tokens[batch_idx, beam_idx] = next_token next_beam_indices[batch_idx, beam_idx] = batch_beam_idx beam_idx += 1 # once the beam for next step is full, don't add more tokens to it. if beam_idx == self.group_size: break if beam_idx < self.group_size: raise ValueError( f"At most {self.group_size} tokens in {next_tokens[batch_idx]} can be equal to `eos_token_id:" f" {eos_token_id}`. Make sure {next_tokens[batch_idx]} are corrected." ) # Check if we are done so that we can save a pad step if all(done) self._done[batch_idx] = self._done[batch_idx] or self.hypo_is_done( batch_idx, next_scores[batch_idx].max().item(), cur_len, ) return next_beam_scores.view(-1), next_beam_tokens.view(-1), next_beam_indices.view(-1) def finalize( self, input_ids: torch.Tensor, final_beam_scores: torch.Tensor, final_beam_tokens: torch.Tensor, final_beam_indices: torch.Tensor, pad_token_id: int, eos_token_id: int, ) -> Tuple[torch.Tensor, torch.Tensor]: batch_size = len(self._beam_hyps_count) # finalize all open beam hypotheses and add to generated hypotheses for batch_idx in range(batch_size): if self._done[batch_idx]: continue # all open beam hypotheses are added to the beam hypothesis # beam hypothesis class automatically keeps the best beams for beam_id in range(self.num_beams): batch_beam_idx = batch_idx * self.num_beams + beam_id final_score = final_beam_scores[batch_beam_idx].item() final_tokens = input_ids[batch_beam_idx] self.hypo_add(final_tokens, final_score, batch_idx) # select the best hypotheses # NOTE: torch.Tensor.new_zeros() is not scriptable sent_lengths = torch.zeros(batch_size * self.num_beam_hyps_to_keep, dtype=torch.long) best = [] best_scores = torch.zeros( batch_size * self.num_beam_hyps_to_keep, device=input_ids.device, dtype=torch.float32 ) # retrieve best hypotheses for i in range(batch_size): # NOTE: lambda is not scriptable batch_hypo_start = torch.sum(self._beam_hyps_count[:i]) if i > 0 else torch.tensor(0, dtype=torch.long) batch_hypo_end = torch.sum(self._beam_hyps_count[: i + 1]) beam_scores = torch.cat(self._beam_scores)[batch_hypo_start:batch_hypo_end] sorted_next_scores, sorted_indices = torch.topk(beam_scores, len(beam_scores), largest=True) for j in range(self.num_beam_hyps_to_keep): best_score = beam_scores[sorted_indices[j]] best_hyp = self._beam_hyps[batch_hypo_start + sorted_indices[j]] sent_lengths[self.num_beam_hyps_to_keep * i + j] = len(best_hyp) # append to lists best.append(best_hyp) best_scores[i * self.num_beam_hyps_to_keep + j] = best_score # prepare for adding eos sent_max_len = min(sent_lengths.max() + 1, self.max_length) decoded = torch.zeros(batch_size * self.num_beam_hyps_to_keep, sent_max_len, dtype=torch.long) # shorter batches are padded if needed if sent_lengths.min() != sent_lengths.max(): assert pad_token_id is not None, "`pad_token_id` has to be defined" decoded.fill_(pad_token_id) # fill with hypotheses and eos_token_id if the latter fits in for i, hypo in enumerate(best): decoded[i, : sent_lengths[i]] = hypo if sent_lengths[i] < self.max_length: decoded[i, sent_lengths[i]] = eos_token_id return decoded, best_scores class BARTBeamSearchGenerator(BARTGenerator): def __init__(self, model): super().__init__(model) self.beam_scorer = BeamSearchScorerTS() self.device = model.device @staticmethod def _expand_inputs_for_generation( input_ids: torch.Tensor, attention_mask: torch.Tensor, last_hidden_state: torch.Tensor, expand_size: int = 1, ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: expanded_return_idx = ( torch.arange(input_ids.shape[0]).view(-1, 1).repeat(1, expand_size).view(-1).to(input_ids.device) ) input_ids = input_ids.index_select(0, expanded_return_idx) attention_mask = attention_mask.index_select(0, expanded_return_idx) last_hidden_state = last_hidden_state.index_select(0, expanded_return_idx.to(last_hidden_state.device)) return input_ids, attention_mask, last_hidden_state def adjust_logits_during_generation(self, logits, cur_len: int, max_length: int): if cur_len == 1 and self.config.force_bos_token_to_be_generated: logits = self._force_token_id_to_be_generated(logits, self.config.bos_token_id) elif cur_len == max_length - 1 and self.config.eos_token_id is not None: logits = self._force_token_id_to_be_generated(logits, self.config.eos_token_id) return logits @staticmethod def _force_token_id_to_be_generated(scores, token_id: int): """force one of token_ids to be generated by setting prob of all other tokens to 0 (logprob=-float("inf"))""" mask = torch.full_like(scores, 1, dtype=torch.bool) mask[:, token_id] = False return scores.masked_fill(mask, -float("inf")) def _reorder_cache(self, past: List[torch.Tensor], beam_idx): # if decoder past is not included in output # speedy decoding is disabled and no need to reorder reordered_decoder_past = [] for state in past: reordered_decoder_past.append(state.index_select(0, beam_idx)) return reordered_decoder_past def beam_search( self, input_ids, encoder_output, attention_mask, num_beams, max_length, pad_token_id: int, eos_token_id: int ): batch_size = self.beam_scorer.batch_size num_beams = self.beam_scorer.num_beams batch_beam_size, cur_len = input_ids.shape assert ( num_beams * batch_size == batch_beam_size ), f"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}." beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device) beam_scores[:, 1:] = -1e9 beam_scores = beam_scores.view((batch_size * num_beams,)) next_tokens = torch.zeros((batch_size, num_beams), dtype=torch.long, device=input_ids.device) next_indices = torch.zeros((batch_size, num_beams), dtype=torch.long, device=input_ids.device) past: List[torch.Tensor] = [] while cur_len < max_length: logits, past = self._decoder_forward(input_ids, encoder_output, attention_mask, past) next_token_logits = logits[:, -1, :] # adjust tokens for Bart, *e.g.* next_token_logits = self.adjust_logits_during_generation( next_token_logits, cur_len=cur_len, max_length=max_length ) next_token_scores = F.log_softmax(next_token_logits, dim=-1) # (batch_size * num_beams, vocab_size) # pre-process distribution next_token_scores = self.logits_processor(input_ids, next_token_scores) next_token_scores = next_token_scores + beam_scores[:, None].expand_as(next_token_scores) # reshape for beam search vocab_size = next_token_scores.shape[-1] next_token_scores = next_token_scores.view(batch_size, num_beams * vocab_size) next_token_scores, next_tokens = torch.topk( next_token_scores, 2 * num_beams, dim=1, largest=True, sorted=True ) next_indices = next_tokens // vocab_size next_tokens = next_tokens % vocab_size beam_scores, beam_next_tokens, beam_idx = self.beam_scorer.process( input_ids, next_token_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id, ) input_ids = torch.cat([input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1) cur_len = cur_len + 1 if len(past) > 0: past = self._reorder_cache(past, beam_idx) if self.beam_scorer.is_done(): break sequences, sequence_scores = self.beam_scorer.finalize( input_ids, beam_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id, ) return sequences def forward(self, input_ids, attention_mask, num_beams, max_length, decoder_start_token_id): pad_token_id = self.config.pad_token_id bos_token_id = self.config.bos_token_id eos_token_id = self.config.eos_token_id # special case if pad_token_id is not defined if pad_token_id is None and eos_token_id is not None: # logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation.") pad_token_id = eos_token_id encoder_output = self._encoder_forward(input_ids, attention_mask) input_ids = self._prepare_decoder_input_ids_for_generation( input_ids, decoder_start_token_id=decoder_start_token_id, bos_token_id=bos_token_id, ) batch_size = input_ids.shape[0] length_penalty = self.config.length_penalty num_return_sequences = self.config.num_return_sequences early_stopping = True self.beam_scorer.init( batch_size=batch_size, max_length=max_length, num_beams=num_beams, device=self.device, length_penalty=length_penalty, do_early_stopping=early_stopping, num_beam_hyps_to_keep=num_return_sequences, ) input_ids, attention_mask, encoder_output = self._expand_inputs_for_generation( input_ids, attention_mask, encoder_output, expand_size=num_beams, ) return self.beam_search( input_ids=input_ids, encoder_output=encoder_output, attention_mask=attention_mask, num_beams=num_beams, max_length=max_length, pad_token_id=pad_token_id, eos_token_id=eos_token_id, )
transformers/examples/research_projects/onnx/summarization/bart_onnx/generation_onnx.py/0
{ "file_path": "transformers/examples/research_projects/onnx/summarization/bart_onnx/generation_onnx.py", "repo_id": "transformers", "token_count": 15163 }
309
#! /usr/bin/env python3 # coding=utf-8 # Copyright (c) 2019 Uber Technologies, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import csv import json import math import time import numpy as np import torch import torch.optim as optim import torch.utils.data as data from nltk.tokenize.treebank import TreebankWordDetokenizer from pplm_classification_head import ClassificationHead from torch import nn from torchtext import data as torchtext_data from torchtext import datasets from tqdm import tqdm, trange from transformers import GPT2LMHeadModel, GPT2Tokenizer torch.manual_seed(0) np.random.seed(0) EPSILON = 1e-10 example_sentence = "This is incredible! I love it, this is the best chicken I have ever had." max_length_seq = 100 class Discriminator(nn.Module): """Transformer encoder followed by a Classification Head""" def __init__(self, class_size, pretrained_model="openai-community/gpt2-medium", cached_mode=False, device="cpu"): super().__init__() self.tokenizer = GPT2Tokenizer.from_pretrained(pretrained_model) self.encoder = GPT2LMHeadModel.from_pretrained(pretrained_model) self.embed_size = self.encoder.transformer.config.hidden_size self.classifier_head = ClassificationHead(class_size=class_size, embed_size=self.embed_size) self.cached_mode = cached_mode self.device = device def get_classifier(self): return self.classifier_head def train_custom(self): for param in self.encoder.parameters(): param.requires_grad = False self.classifier_head.train() def avg_representation(self, x): mask = x.ne(0).unsqueeze(2).repeat(1, 1, self.embed_size).float().to(self.device).detach() hidden = self.encoder.transformer(x)["last_hidden_state"] masked_hidden = hidden * mask avg_hidden = torch.sum(masked_hidden, dim=1) / (torch.sum(mask, dim=1).detach() + EPSILON) return avg_hidden def forward(self, x): if self.cached_mode: avg_hidden = x.to(self.device) else: avg_hidden = self.avg_representation(x.to(self.device)) logits = self.classifier_head(avg_hidden) probs = nn.functional.log_softmax(logits, dim=-1) return probs class Dataset(data.Dataset): def __init__(self, X, y): """Reads source and target sequences from txt files.""" self.X = X self.y = y def __len__(self): return len(self.X) def __getitem__(self, index): """Returns one data pair (source and target).""" data = {} data["X"] = self.X[index] data["y"] = self.y[index] return data def collate_fn(data): def pad_sequences(sequences): lengths = [len(seq) for seq in sequences] padded_sequences = torch.zeros(len(sequences), max(lengths)).long() # padding value = 0 for i, seq in enumerate(sequences): end = lengths[i] padded_sequences[i, :end] = seq[:end] return padded_sequences, lengths item_info = {} for key in data[0].keys(): item_info[key] = [d[key] for d in data] x_batch, _ = pad_sequences(item_info["X"]) y_batch = torch.tensor(item_info["y"], dtype=torch.long) return x_batch, y_batch def cached_collate_fn(data): item_info = {} for key in data[0].keys(): item_info[key] = [d[key] for d in data] x_batch = torch.cat(item_info["X"], 0) y_batch = torch.tensor(item_info["y"], dtype=torch.long) return x_batch, y_batch def train_epoch(data_loader, discriminator, optimizer, epoch=0, log_interval=10, device="cpu"): samples_so_far = 0 discriminator.train_custom() for batch_idx, (input_t, target_t) in enumerate(data_loader): input_t, target_t = input_t.to(device), target_t.to(device) optimizer.zero_grad() output_t = discriminator(input_t) loss = nn.functional.nll_loss(output_t, target_t) loss.backward(retain_graph=True) optimizer.step() samples_so_far += len(input_t) if batch_idx % log_interval == 0: print( "Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format( epoch + 1, samples_so_far, len(data_loader.dataset), 100 * samples_so_far / len(data_loader.dataset), loss.item(), ) ) def evaluate_performance(data_loader, discriminator, device="cpu"): discriminator.eval() test_loss = 0 correct = 0 with torch.no_grad(): for input_t, target_t in data_loader: input_t, target_t = input_t.to(device), target_t.to(device) output_t = discriminator(input_t) # sum up batch loss test_loss += nn.functional.nll_loss(output_t, target_t, reduction="sum").item() # get the index of the max log-probability pred_t = output_t.argmax(dim=1, keepdim=True) correct += pred_t.eq(target_t.view_as(pred_t)).sum().item() test_loss /= len(data_loader.dataset) print( "Performance on test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)".format( test_loss, correct, len(data_loader.dataset), 100.0 * correct / len(data_loader.dataset) ) ) def predict(input_sentence, model, classes, cached=False, device="cpu"): input_t = model.tokenizer.encode(input_sentence) input_t = torch.tensor([input_t], dtype=torch.long, device=device) if cached: input_t = model.avg_representation(input_t) log_probs = model(input_t).data.cpu().numpy().flatten().tolist() print("Input sentence:", input_sentence) print( "Predictions:", ", ".join("{}: {:.4f}".format(c, math.exp(log_prob)) for c, log_prob in zip(classes, log_probs)), ) def get_cached_data_loader(dataset, batch_size, discriminator, shuffle=False, device="cpu"): data_loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=batch_size, collate_fn=collate_fn) xs = [] ys = [] for batch_idx, (x, y) in enumerate(tqdm(data_loader, ascii=True)): with torch.no_grad(): x = x.to(device) avg_rep = discriminator.avg_representation(x).cpu().detach() avg_rep_list = torch.unbind(avg_rep.unsqueeze(1)) xs += avg_rep_list ys += y.cpu().numpy().tolist() data_loader = torch.utils.data.DataLoader( dataset=Dataset(xs, ys), batch_size=batch_size, shuffle=shuffle, collate_fn=cached_collate_fn ) return data_loader def train_discriminator( dataset, dataset_fp=None, pretrained_model="openai-community/gpt2-medium", epochs=10, batch_size=64, log_interval=10, save_model=False, cached=False, no_cuda=False, ): device = "cuda" if torch.cuda.is_available() and not no_cuda else "cpu" print("Preprocessing {} dataset...".format(dataset)) start = time.time() if dataset == "SST": idx2class = ["positive", "negative", "very positive", "very negative", "neutral"] class2idx = {c: i for i, c in enumerate(idx2class)} discriminator = Discriminator( class_size=len(idx2class), pretrained_model=pretrained_model, cached_mode=cached, device=device ).to(device) text = torchtext_data.Field() label = torchtext_data.Field(sequential=False) train_data, val_data, test_data = datasets.SST.splits( text, label, fine_grained=True, train_subtrees=True, ) x = [] y = [] for i in trange(len(train_data), ascii=True): seq = TreebankWordDetokenizer().detokenize(vars(train_data[i])["text"]) seq = discriminator.tokenizer.encode(seq) seq = torch.tensor([50256] + seq, device=device, dtype=torch.long) x.append(seq) y.append(class2idx[vars(train_data[i])["label"]]) train_dataset = Dataset(x, y) test_x = [] test_y = [] for i in trange(len(test_data), ascii=True): seq = TreebankWordDetokenizer().detokenize(vars(test_data[i])["text"]) seq = discriminator.tokenizer.encode(seq) seq = torch.tensor([50256] + seq, device=device, dtype=torch.long) test_x.append(seq) test_y.append(class2idx[vars(test_data[i])["label"]]) test_dataset = Dataset(test_x, test_y) discriminator_meta = { "class_size": len(idx2class), "embed_size": discriminator.embed_size, "pretrained_model": pretrained_model, "class_vocab": class2idx, "default_class": 2, } elif dataset == "clickbait": idx2class = ["non_clickbait", "clickbait"] class2idx = {c: i for i, c in enumerate(idx2class)} discriminator = Discriminator( class_size=len(idx2class), pretrained_model=pretrained_model, cached_mode=cached, device=device ).to(device) with open("datasets/clickbait/clickbait_train_prefix.txt") as f: data = [] for i, line in enumerate(f): try: data.append(eval(line)) except Exception: print("Error evaluating line {}: {}".format(i, line)) continue x = [] y = [] with open("datasets/clickbait/clickbait_train_prefix.txt") as f: for i, line in enumerate(tqdm(f, ascii=True)): try: d = eval(line) seq = discriminator.tokenizer.encode(d["text"]) if len(seq) < max_length_seq: seq = torch.tensor([50256] + seq, device=device, dtype=torch.long) else: print("Line {} is longer than maximum length {}".format(i, max_length_seq)) continue x.append(seq) y.append(d["label"]) except Exception: print("Error evaluating / tokenizing line {}, skipping it".format(i)) pass full_dataset = Dataset(x, y) train_size = int(0.9 * len(full_dataset)) test_size = len(full_dataset) - train_size train_dataset, test_dataset = torch.utils.data.random_split(full_dataset, [train_size, test_size]) discriminator_meta = { "class_size": len(idx2class), "embed_size": discriminator.embed_size, "pretrained_model": pretrained_model, "class_vocab": class2idx, "default_class": 1, } elif dataset == "toxic": idx2class = ["non_toxic", "toxic"] class2idx = {c: i for i, c in enumerate(idx2class)} discriminator = Discriminator( class_size=len(idx2class), pretrained_model=pretrained_model, cached_mode=cached, device=device ).to(device) x = [] y = [] with open("datasets/toxic/toxic_train.txt") as f: for i, line in enumerate(tqdm(f, ascii=True)): try: d = eval(line) seq = discriminator.tokenizer.encode(d["text"]) if len(seq) < max_length_seq: seq = torch.tensor([50256] + seq, device=device, dtype=torch.long) else: print("Line {} is longer than maximum length {}".format(i, max_length_seq)) continue x.append(seq) y.append(int(np.sum(d["label"]) > 0)) except Exception: print("Error evaluating / tokenizing line {}, skipping it".format(i)) pass full_dataset = Dataset(x, y) train_size = int(0.9 * len(full_dataset)) test_size = len(full_dataset) - train_size train_dataset, test_dataset = torch.utils.data.random_split(full_dataset, [train_size, test_size]) discriminator_meta = { "class_size": len(idx2class), "embed_size": discriminator.embed_size, "pretrained_model": pretrained_model, "class_vocab": class2idx, "default_class": 0, } else: # if dataset == "generic": # This assumes the input dataset is a TSV with the following structure: # class \t text if dataset_fp is None: raise ValueError("When generic dataset is selected, dataset_fp needs to be specified aswell.") classes = set() with open(dataset_fp) as f: csv_reader = csv.reader(f, delimiter="\t") for row in tqdm(csv_reader, ascii=True): if row: classes.add(row[0]) idx2class = sorted(classes) class2idx = {c: i for i, c in enumerate(idx2class)} discriminator = Discriminator( class_size=len(idx2class), pretrained_model=pretrained_model, cached_mode=cached, device=device ).to(device) x = [] y = [] with open(dataset_fp) as f: csv_reader = csv.reader(f, delimiter="\t") for i, row in enumerate(tqdm(csv_reader, ascii=True)): if row: label = row[0] text = row[1] try: seq = discriminator.tokenizer.encode(text) if len(seq) < max_length_seq: seq = torch.tensor([50256] + seq, device=device, dtype=torch.long) else: print("Line {} is longer than maximum length {}".format(i, max_length_seq)) continue x.append(seq) y.append(class2idx[label]) except Exception: print("Error tokenizing line {}, skipping it".format(i)) pass full_dataset = Dataset(x, y) train_size = int(0.9 * len(full_dataset)) test_size = len(full_dataset) - train_size train_dataset, test_dataset = torch.utils.data.random_split(full_dataset, [train_size, test_size]) discriminator_meta = { "class_size": len(idx2class), "embed_size": discriminator.embed_size, "pretrained_model": pretrained_model, "class_vocab": class2idx, "default_class": 0, } end = time.time() print("Preprocessed {} data points".format(len(train_dataset) + len(test_dataset))) print("Data preprocessing took: {:.3f}s".format(end - start)) if cached: print("Building representation cache...") start = time.time() train_loader = get_cached_data_loader(train_dataset, batch_size, discriminator, shuffle=True, device=device) test_loader = get_cached_data_loader(test_dataset, batch_size, discriminator, device=device) end = time.time() print("Building representation cache took: {:.3f}s".format(end - start)) else: train_loader = torch.utils.data.DataLoader( dataset=train_dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn ) test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, collate_fn=collate_fn) if save_model: with open("{}_classifier_head_meta.json".format(dataset), "w") as meta_file: json.dump(discriminator_meta, meta_file) optimizer = optim.Adam(discriminator.parameters(), lr=0.0001) for epoch in range(epochs): start = time.time() print("\nEpoch", epoch + 1) train_epoch( discriminator=discriminator, data_loader=train_loader, optimizer=optimizer, epoch=epoch, log_interval=log_interval, device=device, ) evaluate_performance(data_loader=test_loader, discriminator=discriminator, device=device) end = time.time() print("Epoch took: {:.3f}s".format(end - start)) print("\nExample prediction") predict(example_sentence, discriminator, idx2class, cached=cached, device=device) if save_model: # torch.save(discriminator.state_dict(), # "{}_discriminator_{}.pt".format( # args.dataset, epoch + 1 # )) torch.save( discriminator.get_classifier().state_dict(), "{}_classifier_head_epoch_{}.pt".format(dataset, epoch + 1), ) if __name__ == "__main__": parser = argparse.ArgumentParser(description="Train a discriminator on top of GPT-2 representations") parser.add_argument( "--dataset", type=str, default="SST", choices=("SST", "clickbait", "toxic", "generic"), help=( "dataset to train the discriminator on. " "In case of generic, the dataset is expected " "to be a TSBV file with structure: class \\t text" ), ) parser.add_argument( "--dataset_fp", type=str, default="", help="File path of the dataset to use. Needed only in case of generic datadset", ) parser.add_argument( "--pretrained_model", type=str, default="openai-community/gpt2-medium", help="Pretrained model to use as encoder", ) parser.add_argument("--epochs", type=int, default=10, metavar="N", help="Number of training epochs") parser.add_argument( "--batch_size", type=int, default=64, metavar="N", help="input batch size for training (default: 64)" ) parser.add_argument( "--log_interval", type=int, default=10, metavar="N", help="how many batches to wait before logging training status", ) parser.add_argument("--save_model", action="store_true", help="whether to save the model") parser.add_argument("--cached", action="store_true", help="whether to cache the input representations") parser.add_argument("--no_cuda", action="store_true", help="use to turn off cuda") args = parser.parse_args() train_discriminator(**(vars(args)))
transformers/examples/research_projects/pplm/run_pplm_discrim_train.py/0
{ "file_path": "transformers/examples/research_projects/pplm/run_pplm_discrim_train.py", "repo_id": "transformers", "token_count": 8849 }
310
import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version logger = logging.getLogger(__name__) require_version("pytorch_lightning>=1.0.4") MODEL_MODES = { "base": AutoModel, "sequence-classification": AutoModelForSequenceClassification, "question-answering": AutoModelForQuestionAnswering, "pretraining": AutoModelForPreTraining, "token-classification": AutoModelForTokenClassification, "language-modeling": AutoModelWithLMHead, "summarization": AutoModelForSeq2SeqLM, "translation": AutoModelForSeq2SeqLM, } # update this and the import above to support new schedulers from transformers.optimization arg_to_scheduler = { "linear": get_linear_schedule_with_warmup, "cosine": get_cosine_schedule_with_warmup, "cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup, "polynomial": get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } arg_to_scheduler_choices = sorted(arg_to_scheduler.keys()) arg_to_scheduler_metavar = "{" + ", ".join(arg_to_scheduler_choices) + "}" class BaseTransformer(pl.LightningModule): def __init__( self, hparams: argparse.Namespace, num_labels=None, mode="base", config=None, tokenizer=None, model=None, **config_kwargs, ): """Initialize a model, tokenizer and config.""" super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(hparams) self.step_count = 0 self.output_dir = Path(self.hparams.output_dir) cache_dir = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: self.config = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path, **({"num_labels": num_labels} if num_labels is not None else {}), cache_dir=cache_dir, **config_kwargs, ) else: self.config: PretrainedConfig = config extra_model_params = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout") for p in extra_model_params: if getattr(self.hparams, p, None): assert hasattr(self.config, p), f"model config doesn't have a `{p}` attribute" setattr(self.config, p, getattr(self.hparams, p)) if tokenizer is None: self.tokenizer = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path, cache_dir=cache_dir, ) else: self.tokenizer: PreTrainedTokenizer = tokenizer self.model_type = MODEL_MODES[mode] if model is None: self.model = self.model_type.from_pretrained( self.hparams.model_name_or_path, from_tf=bool(".ckpt" in self.hparams.model_name_or_path), config=self.config, cache_dir=cache_dir, ) else: self.model = model def load_hf_checkpoint(self, *args, **kwargs): self.model = self.model_type.from_pretrained(*args, **kwargs) def get_lr_scheduler(self): get_schedule_func = arg_to_scheduler[self.hparams.lr_scheduler] scheduler = get_schedule_func( self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=self.total_steps() ) scheduler = {"scheduler": scheduler, "interval": "step", "frequency": 1} return scheduler def configure_optimizers(self): """Prepare optimizer and schedule (linear warmup and decay)""" model = self.model no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay) ], # check this named paramters "weight_decay": self.hparams.weight_decay, }, { "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, ] if self.hparams.adafactor: optimizer = Adafactor( optimizer_grouped_parameters, lr=self.hparams.learning_rate, scale_parameter=False, relative_step=False ) else: optimizer = AdamW( optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon ) self.opt = optimizer scheduler = self.get_lr_scheduler() return [optimizer], [scheduler] def test_step(self, batch, batch_nb): return self.validation_step(batch, batch_nb) def test_epoch_end(self, outputs): return self.validation_end(outputs) def total_steps(self) -> int: """The number of total training steps that will be run. Used for lr scheduler purposes.""" num_devices = max(1, self.hparams.gpus) # TODO: consider num_tpu_cores effective_batch_size = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def setup(self, stage): if stage == "test": self.dataset_size = len(self.test_dataloader().dataset) else: self.train_loader = self.get_dataloader("train", self.hparams.train_batch_size, shuffle=True) self.dataset_size = len(self.train_dataloader().dataset) def get_dataloader(self, type_path: str, batch_size: int, shuffle: bool = False): raise NotImplementedError("You must implement this for your task") def train_dataloader(self): return self.train_loader def val_dataloader(self): return self.get_dataloader("dev", self.hparams.eval_batch_size, shuffle=False) def test_dataloader(self): return self.get_dataloader("test", self.hparams.eval_batch_size, shuffle=False) def _feature_file(self, mode): return os.path.join( self.hparams.data_dir, "cached_{}_{}_{}".format( mode, list(filter(None, self.hparams.model_name_or_path.split("/"))).pop(), str(self.hparams.max_seq_length), ), ) @pl.utilities.rank_zero_only def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None: save_path = self.output_dir.joinpath("best_tfmr") self.model.config.save_step = self.step_count self.model.save_pretrained(save_path) self.tokenizer.save_pretrained(save_path) @staticmethod def add_model_specific_args(parser, root_dir): parser.add_argument( "--model_name_or_path", default=None, type=str, required=True, help="Path to pretrained model or model identifier from huggingface.co/models", ) parser.add_argument( "--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name" ) parser.add_argument( "--tokenizer_name", default=None, type=str, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--cache_dir", default=str(Path(__file__).parent / "test_run" / "cache"), type=str, help="Where do you want to store the pre-trained models downloaded from huggingface.co", ) parser.add_argument( "--encoder_layerdrop", type=float, help="Encoder layer dropout probability (Optional). Goes into model.config", ) parser.add_argument( "--decoder_layerdrop", type=float, help="Decoder layer dropout probability (Optional). Goes into model.config", ) parser.add_argument( "--dropout", type=float, help="Dropout probability (Optional). Goes into model.config", ) parser.add_argument( "--attention_dropout", type=float, help="Attention dropout probability (Optional). Goes into model.config", ) parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument( "--lr_scheduler", default="linear", choices=arg_to_scheduler_choices, metavar=arg_to_scheduler_metavar, type=str, help="Learning rate scheduler", ) parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument("--num_workers", default=4, type=int, help="kwarg passed to DataLoader") parser.add_argument("--num_train_epochs", dest="max_epochs", default=3, type=int) parser.add_argument("--train_batch_size", default=32, type=int) parser.add_argument("--eval_batch_size", default=32, type=int) parser.add_argument("--adafactor", action="store_true") class InitCallback(pl.Callback): # this process can also be done with PL ddp plugging. # But still it is experimental (check original RAG, I updated that with pluggin (shamanez)) def on_sanity_check_start(self, trainer, pl_module): if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class CheckParamCallback(pl.Callback): # check whether new added model paramters are differentiable def on_after_backward(self, trainer, pl_module): # print(pl_module.model.rag) for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(name) class LoggingCallback(pl.Callback): def on_batch_end(self, trainer, pl_module): lr_scheduler = trainer.lr_schedulers[0]["scheduler"] lrs = {f"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr())} pl_module.logger.log_metrics(lrs) def on_validation_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule): rank_zero_info("***** Validation results *****") metrics = trainer.callback_metrics # Log results for key in sorted(metrics): if key not in ["log", "progress_bar"]: rank_zero_info("{} = {}\n".format(key, str(metrics[key]))) def on_test_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule): rank_zero_info("***** Test results *****") metrics = trainer.callback_metrics # Log and save results to file output_test_results_file = os.path.join(pl_module.hparams.output_dir, "test_results.txt") with open(output_test_results_file, "w") as writer: for key in sorted(metrics): if key not in ["log", "progress_bar"]: rank_zero_info("{} = {}\n".format(key, str(metrics[key]))) writer.write("{} = {}\n".format(key, str(metrics[key]))) def add_generic_args(parser, root_dir) -> None: # To allow all pl args uncomment the following line # parser = pl.Trainer.add_argparse_args(parser) parser.add_argument( "--output_dir", default=str(Path(__file__).parent / "test_run" / "model_checkpoints"), type=str, help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument( "--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", ) parser.add_argument( "--fp16_opt_level", type=str, default="O2", help=( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. " "See details at https://nvidia.github.io/apex/amp.html" ), ) parser.add_argument("--n_tpu_cores", dest="tpu_cores", type=int) parser.add_argument("--max_grad_norm", dest="gradient_clip_val", default=1.0, type=float, help="Max gradient norm") parser.add_argument("--do_train", action="store_true", help="Whether to run training.") parser.add_argument("--do_predict", action="store_true", help="Whether to run predictions on the test set.") parser.add_argument( "--gradient_accumulation_steps", dest="accumulate_grad_batches", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") parser.add_argument( "--data_dir", default=str(Path(__file__).parent / "test_run" / "dummy-train-data"), type=str, help="The input data dir. Should contain the training files for the CoNLL-2003 NER task.", ) def generic_train( model: BaseTransformer, args: argparse.Namespace, early_stopping_callback=None, logger=True, # can pass WandbLogger() here extra_callbacks=[], checkpoint_callback=None, logging_callback=None, **extra_train_kwargs, ): pl.seed_everything(args.seed) # init model odir = Path(model.hparams.output_dir) odir.mkdir(exist_ok=True) # add custom checkpoints if checkpoint_callback is None: checkpoint_callback = pl.callbacks.ModelCheckpoint( filepath=args.output_dir, prefix="checkpoint", monitor="val_loss", mode="min", save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(early_stopping_callback) if logging_callback is None: logging_callback = LoggingCallback() train_params = {} if args.fp16: train_params["precision"] = 16 if args.gpus > 1: train_params["accelerator"] = "auto" train_params["strategy"] = "ddp" train_params["accumulate_grad_batches"] = args.accumulate_grad_batches train_params["profiler"] = None train_params["devices"] = "auto" trainer = pl.Trainer.from_argparse_args( args, weights_summary=None, callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback], logger=logger, val_check_interval=1, num_sanity_val_steps=2, **train_params, ) if args.do_train: trainer.fit(model) else: print("RAG modeling tests with new set functions successfully executed!") return trainer
transformers/examples/research_projects/rag-end2end-retriever/lightning_base.py/0
{ "file_path": "transformers/examples/research_projects/rag-end2end-retriever/lightning_base.py", "repo_id": "transformers", "token_count": 7006 }
311
import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def count_trainable_parameters(model): model_parameters = filter(lambda p: p.requires_grad, model.parameters()) params = sum([np.prod(p.size()) for p in model_parameters]) return params logger = logging.getLogger(__name__) def get_checkpoint_callback(output_dir, metric): """Saves the best model by validation EM score.""" if metric == "rouge2": exp = "{val_avg_rouge2:.4f}-{step_count}" elif metric == "bleu": exp = "{val_avg_bleu:.4f}-{step_count}" elif metric == "em": exp = "{val_avg_em:.4f}-{step_count}" else: raise NotImplementedError( f"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this" " function." ) checkpoint_callback = ModelCheckpoint( dirpath=output_dir, filename=exp, monitor=f"val_{metric}", mode="max", save_top_k=3, every_n_epochs=1, # maybe save a checkpoint every time val is run, not just end of epoch. ) return checkpoint_callback def get_early_stopping_callback(metric, patience): return EarlyStopping( monitor=f"val_{metric}", # does this need avg? mode="min" if "loss" in metric else "max", patience=patience, verbose=True, ) class Seq2SeqLoggingCallback(pl.Callback): def on_batch_end(self, trainer, pl_module): lrs = {f"lr_group_{i}": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)} pl_module.logger.log_metrics(lrs) @rank_zero_only def _write_logs( self, trainer: pl.Trainer, pl_module: pl.LightningModule, type_path: str, save_generations=True ) -> None: logger.info(f"***** {type_path} results at step {trainer.global_step:05d} *****") metrics = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]}) # Log results od = Path(pl_module.hparams.output_dir) if type_path == "test": results_file = od / "test_results.txt" generations_file = od / "test_generations.txt" else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. results_file = od / f"{type_path}_results/{trainer.global_step:05d}.txt" generations_file = od / f"{type_path}_generations/{trainer.global_step:05d}.txt" results_file.parent.mkdir(exist_ok=True) generations_file.parent.mkdir(exist_ok=True) with open(results_file, "a+") as writer: for key in sorted(metrics): if key in ["log", "progress_bar", "preds"]: continue val = metrics[key] if isinstance(val, torch.Tensor): val = val.item() msg = f"{key}: {val:.6f}\n" writer.write(msg) if not save_generations: return if "preds" in metrics: content = "\n".join(metrics["preds"]) generations_file.open("w+").write(content) @rank_zero_only def on_train_start(self, trainer, pl_module): try: npars = pl_module.model.model.num_parameters() except AttributeError: npars = pl_module.model.num_parameters() n_trainable_pars = count_trainable_parameters(pl_module) # mp stands for million parameters trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6}) @rank_zero_only def on_test_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule): save_json(pl_module.metrics, pl_module.metrics_save_path) return self._write_logs(trainer, pl_module, "test") @rank_zero_only def on_validation_end(self, trainer: pl.Trainer, pl_module): save_json(pl_module.metrics, pl_module.metrics_save_path) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
transformers/examples/research_projects/rag/callbacks_rag.py/0
{ "file_path": "transformers/examples/research_projects/rag/callbacks_rag.py", "repo_id": "transformers", "token_count": 1946 }
312
#!/usr/bin/env python3 import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def log_results(result: Dataset, args: Dict[str, str]): """DO NOT CHANGE. This function computes and logs the result metrics.""" log_outputs = args.log_outputs dataset_id = "_".join(args.dataset.split("/") + [args.config, args.split]) # load metric wer = load_metric("wer") cer = load_metric("cer") # compute metrics wer_result = wer.compute(references=result["target"], predictions=result["prediction"]) cer_result = cer.compute(references=result["target"], predictions=result["prediction"]) # print & log results result_str = f"WER: {wer_result}\nCER: {cer_result}" print(result_str) with open(f"{dataset_id}_eval_results.txt", "w") as f: f.write(result_str) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: pred_file = f"log_{dataset_id}_predictions.txt" target_file = f"log_{dataset_id}_targets.txt" with open(pred_file, "w") as p, open(target_file, "w") as t: # mapping function to write output def write_to_file(batch, i): p.write(f"{i}" + "\n") p.write(batch["prediction"] + "\n") t.write(f"{i}" + "\n") t.write(batch["target"] + "\n") result.map(write_to_file, with_indices=True) def normalize_text(text: str) -> str: """DO ADAPT FOR YOUR USE CASE. this function normalizes the target text.""" chars_to_ignore_regex = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training text = re.sub(chars_to_ignore_regex, "", text.lower()) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! token_sequences_to_ignore = ["\n\n", "\n", " ", " "] for t in token_sequences_to_ignore: text = " ".join(text.split(t)) return text def main(args): # load dataset dataset = load_dataset(args.dataset, args.config, split=args.split, token=True) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor feature_extractor = AutoFeatureExtractor.from_pretrained(args.model_id) sampling_rate = feature_extractor.sampling_rate # resample audio dataset = dataset.cast_column("audio", Audio(sampling_rate=sampling_rate)) # load eval pipeline if args.device is None: args.device = 0 if torch.cuda.is_available() else -1 asr = pipeline("automatic-speech-recognition", model=args.model_id, device=args.device) # map function to decode audio def map_to_pred(batch): prediction = asr( batch["audio"]["array"], chunk_length_s=args.chunk_length_s, stride_length_s=args.stride_length_s ) batch["prediction"] = prediction["text"] batch["target"] = normalize_text(batch["sentence"]) return batch # run inference on all examples result = dataset.map(map_to_pred, remove_columns=dataset.column_names) # compute and log_results # do not change function below log_results(result, args) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers" ) parser.add_argument( "--dataset", type=str, required=True, help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets", ) parser.add_argument( "--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice" ) parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`") parser.add_argument( "--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds." ) parser.add_argument( "--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second." ) parser.add_argument( "--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis." ) parser.add_argument( "--device", type=int, default=None, help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.", ) args = parser.parse_args() main(args)
transformers/examples/research_projects/robust-speech-event/eval.py/0
{ "file_path": "transformers/examples/research_projects/robust-speech-event/eval.py", "repo_id": "transformers", "token_count": 1852 }
313
#!/usr/bin/env bash export PYTHONPATH="../":"${PYTHONPATH}" export WANDB_PROJECT=dmar export MAX_LEN=128 python finetune.py \ --learning_rate=3e-4 \ --do_train \ --do_predict \ --fp16 \ --val_check_interval 0.25 \ --data_dir $ENRO_DIR \ --max_source_length $MAX_LEN --max_target_length $MAX_LEN --val_max_target_length $MAX_LEN --test_max_target_length $MAX_LEN \ --freeze_encoder --freeze_embeds \ --train_batch_size=$BS --eval_batch_size=$BS \ --tokenizer_name $m --model_name_or_path $m \ --warmup_steps 500 --sortish_sampler --logger_name wandb \ --gpus 1 --fp16_opt_level=O1 --task translation --num_sanity_val_steps=0 \ "$@"
transformers/examples/research_projects/seq2seq-distillation/distil_marian_no_teacher.sh/0
{ "file_path": "transformers/examples/research_projects/seq2seq-distillation/distil_marian_no_teacher.sh", "repo_id": "transformers", "token_count": 274 }
314
#!/usr/bin/env bash export PYTHONPATH="../":"${PYTHONPATH}" python finetune.py \ --learning_rate=3e-5 \ --fp16 \ --do_train \ --val_check_interval=0.25 \ --adam_eps 1e-06 \ --num_train_epochs 6 --src_lang en_XX --tgt_lang ro_RO \ --data_dir $ENRO_DIR \ --max_source_length $MAX_LEN --max_target_length $MAX_LEN --val_max_target_length $MAX_LEN --test_max_target_length $MAX_LEN \ --train_batch_size=$BS --eval_batch_size=$BS \ --task translation \ --warmup_steps 500 \ --freeze_embeds \ --model_name_or_path=facebook/mbart-large-cc25 \ "$@"
transformers/examples/research_projects/seq2seq-distillation/train_mbart_cc25_enro.sh/0
{ "file_path": "transformers/examples/research_projects/seq2seq-distillation/train_mbart_cc25_enro.sh", "repo_id": "transformers", "token_count": 273 }
315
# Simple VQGAN CLIP Author: @ErwannMillon This is a very simple VQGAN-CLIP implementation that was built as a part of the <a href= "https://github.com/ErwannMillon/face-editor"> Face Editor project </a> . This simplified version allows you to generate or edit images using text with just three lines of code. For a more full featured implementation with masking, more advanced losses, and a full GUI, check out the Face Editor project. By default this uses a CelebA checkpoint (for generating/editing faces), but also has an imagenet checkpoint that can be loaded by specifying vqgan_config and vqgan_checkpoint when instantiating VQGAN_CLIP. Learning rate and iterations can be set by modifying vqgan_clip.lr and vqgan_clip.iterations . You can edit images by passing `image_path` to the generate function. See the generate function's docstring to learn more about how to format prompts. ## Usage The easiest way to test this out is by <a href="https://colab.research.google.com/drive/1Ez4D1J6-hVkmlXeR5jBPWYyu6CLA9Yor?usp=sharing ">using the Colab demo</a> To install locally: - Clone this repo - Install git-lfs (ubuntu: sudo apt-get install git-lfs , MacOS: brew install git-lfs) In the root of the repo run: ```bash conda create -n vqganclip python=3.8 conda activate vqganclip git-lfs install git clone https://huggingface.co/datasets/erwann/face_editor_model_ckpt model_checkpoints pip install -r requirements.txt ``` ### Generate new images ```python from VQGAN_CLIP import VQGAN_CLIP vqgan_clip = VQGAN_CLIP() vqgan_clip.generate("a picture of a smiling woman") ``` ### Edit an image To get a test image, run `git clone https://huggingface.co/datasets/erwann/vqgan-clip-pic test_images` To edit: ```python from VQGAN_CLIP import VQGAN_CLIP vqgan_clip = VQGAN_CLIP() vqgan_clip.lr = .07 vqgan_clip.iterations = 15 vqgan_clip.generate( pos_prompts= ["a picture of a beautiful asian woman", "a picture of a woman from Japan"], neg_prompts=["a picture of an Indian person", "a picture of a white person"], image_path="./test_images/face.jpeg", show_intermediate=True, save_intermediate=True, ) ``` ### Make an animation from the most recent generation `vqgan_clip.make_animation()` ## Features: - Positive and negative prompts - Multiple prompts - Prompt Weights - Creating GIF animations of the transformations - Wandb logging
transformers/examples/research_projects/vqgan-clip/README.md/0
{ "file_path": "transformers/examples/research_projects/vqgan-clip/README.md", "repo_id": "transformers", "token_count": 777 }
316
#!/usr/bin/env bash python run_common_voice.py \ --model_name_or_path="facebook/wav2vec2-large-xlsr-53" \ --dataset_config_name="tr" \ --output_dir=./wav2vec2-large-xlsr-turkish-demo \ --overwrite_output_dir \ --num_train_epochs="5" \ --per_device_train_batch_size="16" \ --evaluation_strategy="steps" \ --learning_rate="3e-4" \ --warmup_steps="500" \ --fp16 \ --freeze_feature_extractor \ --save_steps="400" \ --eval_steps="400" \ --save_total_limit="3" \ --logging_steps="400" \ --group_by_length \ --feat_proj_dropout="0.0" \ --layerdrop="0.1" \ --gradient_checkpointing \ --do_train --do_eval
transformers/examples/research_projects/wav2vec2/finetune_wav2vec2_xlsr_turkish.sh/0
{ "file_path": "transformers/examples/research_projects/wav2vec2/finetune_wav2vec2_xlsr_turkish.sh", "repo_id": "transformers", "token_count": 316 }
317
<!--- Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # 🤗 Benchmark results Here, you can find a list of the different benchmark results created by the community. If you would like to list benchmark results on your favorite models of the [model hub](https://huggingface.co/models) here, please open a Pull Request and add it below. | Benchmark description | Results | Environment info | Author | |:----------|:-------------|:-------------|------:| | PyTorch Benchmark on inference for `google-bert/bert-base-cased` |[memory](https://github.com/patrickvonplaten/files_to_link_to/blob/master/bert_benchmark/inference_memory.csv) | [env](https://github.com/patrickvonplaten/files_to_link_to/blob/master/bert_benchmark/env.csv) | [Partick von Platen](https://github.com/patrickvonplaten) | | PyTorch Benchmark on inference for `google-bert/bert-base-cased` |[time](https://github.com/patrickvonplaten/files_to_link_to/blob/master/bert_benchmark/inference_time.csv) | [env](https://github.com/patrickvonplaten/files_to_link_to/blob/master/bert_benchmark/env.csv) | [Partick von Platen](https://github.com/patrickvonplaten) |
transformers/examples/tensorflow/benchmarking/README.md/0
{ "file_path": "transformers/examples/tensorflow/benchmarking/README.md", "repo_id": "transformers", "token_count": 501 }
318
#!/usr/bin/env python # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Finetuning the library models for sequence classification on GLUE.""" # You can also adapt this script on your own text classification task. Pointers for this are left as comments. import json import logging import os import sys import warnings from dataclasses import dataclass, field from typing import Optional import evaluate import numpy as np import tensorflow as tf from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoTokenizer, DataCollatorWithPadding, DefaultDataCollator, HfArgumentParser, PretrainedConfig, PushToHubCallback, TFAutoModelForSequenceClassification, TFTrainingArguments, create_optimizer, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process from transformers.utils import check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.40.0.dev0") task_to_keys = { "cola": ("sentence", None), "mnli": ("premise", "hypothesis"), "mrpc": ("sentence1", "sentence2"), "qnli": ("question", "sentence"), "qqp": ("question1", "question2"), "rte": ("sentence1", "sentence2"), "sst2": ("sentence", None), "stsb": ("sentence1", "sentence2"), "wnli": ("sentence1", "sentence2"), } logger = logging.getLogger(__name__) # region Command-line arguments @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command line. """ task_name: str = field( metadata={"help": "The name of the task to train on: " + ", ".join(task_to_keys.keys())}, ) predict_file: str = field( metadata={"help": "A file containing user-supplied examples to make predictions for"}, default=None, ) max_seq_length: int = field( default=128, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."} ) pad_to_max_length: bool = field( default=False, metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." ) }, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) max_predict_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of prediction examples to this " "value if set." ) }, ) def __post_init__(self): self.task_name = self.task_name.lower() if self.task_name not in task_to_keys.keys(): raise ValueError("Unknown task, you should pick one in " + ",".join(task_to_keys.keys())) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) token: str = field( default=None, metadata={ "help": ( "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token " "generated when running `huggingface-cli login` (stored in `~/.huggingface`)." ) }, ) use_auth_token: bool = field( default=None, metadata={ "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token` instead." }, ) trust_remote_code: bool = field( default=False, metadata={ "help": ( "Whether or not to allow for custom models defined on the Hub in their own modeling files. This option " "should only be set to `True` for repositories you trust and in which you have read the code, as it will " "execute code present on the Hub on your local machine." ) }, ) # endregion def main(): # region Argument parsing # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() if model_args.use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token` instead.", FutureWarning, ) if model_args.token is not None: raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") model_args.token = model_args.use_auth_token # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_glue", model_args, data_args, framework="tensorflow") if not (training_args.do_train or training_args.do_eval or training_args.do_predict): exit("Must specify at least one of --do_train, --do_eval or --do_predict!") # endregion # region Checkpoints checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: checkpoint = get_last_checkpoint(training_args.output_dir) if checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # endregion # region Logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info(f"Training/evaluation parameters {training_args}") # endregion # region Dataset and labels # Set seed before initializing model. set_seed(training_args.seed) # Downloading and loading a dataset from the hub. In distributed training, the load_dataset function guarantee # that only one local process can concurrently download the dataset. datasets = load_dataset( "nyu-mll/glue", data_args.task_name, cache_dir=model_args.cache_dir, token=model_args.token, ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets. is_regression = data_args.task_name == "stsb" if not is_regression: label_list = datasets["train"].features["label"].names num_labels = len(label_list) else: num_labels = 1 if data_args.predict_file is not None: logger.info("Preparing user-supplied file for predictions...") data_files = {"data": data_args.predict_file} for key in data_files.keys(): logger.info(f"Loading a local file for {key}: {data_files[key]}") if data_args.predict_file.endswith(".csv"): # Loading a dataset from local csv files user_dataset = load_dataset("csv", data_files=data_files, cache_dir=model_args.cache_dir) else: # Loading a dataset from local json files user_dataset = load_dataset("json", data_files=data_files, cache_dir=model_args.cache_dir) needed_keys = task_to_keys[data_args.task_name] for key in needed_keys: assert key in user_dataset["data"].features, f"Your supplied predict_file is missing the {key} key!" datasets["user_data"] = user_dataset["data"] # endregion # region Load model config and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=num_labels, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) # endregion # region Dataset preprocessing sentence1_key, sentence2_key = task_to_keys[data_args.task_name] # Padding strategy if data_args.pad_to_max_length: padding = "max_length" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch padding = False # Some models have set the order of the labels to use, so let's make sure we do use it. label_to_id = None if config.label2id != PretrainedConfig(num_labels=num_labels).label2id and not is_regression: # Some have all caps in their config, some don't. label_name_to_id = {k.lower(): v for k, v in config.label2id.items()} if sorted(label_name_to_id.keys()) == sorted(label_list): label_to_id = {i: int(label_name_to_id[label_list[i]]) for i in range(num_labels)} else: logger.warning( "Your model seems to have been trained with labels, but they don't match the dataset: ", f"model labels: {sorted(label_name_to_id.keys())}, dataset labels: {sorted(label_list)}." "\nIgnoring the model labels as a result.", ) label_to_id = {label: i for i, label in enumerate(label_list)} if label_to_id is not None: config.label2id = label_to_id config.id2label = {id: label for label, id in config.label2id.items()} elif data_args.task_name is not None and not is_regression: config.label2id = {l: i for i, l in enumerate(label_list)} config.id2label = {id: label for label, id in config.label2id.items()} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the " f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) def preprocess_function(examples): # Tokenize the texts args = ( (examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key]) ) result = tokenizer(*args, padding=padding, max_length=max_seq_length, truncation=True) return result datasets = datasets.map(preprocess_function, batched=True, load_from_cache_file=not data_args.overwrite_cache) if data_args.pad_to_max_length: data_collator = DefaultDataCollator(return_tensors="np") else: data_collator = DataCollatorWithPadding(tokenizer, return_tensors="np") # endregion # region Metric function metric = evaluate.load("glue", data_args.task_name, cache_dir=model_args.cache_dir) def compute_metrics(preds, label_ids): preds = preds["logits"] preds = np.squeeze(preds) if is_regression else np.argmax(preds, axis=1) result = metric.compute(predictions=preds, references=label_ids) if len(result) > 1: result["combined_score"] = np.mean(list(result.values())).item() return result # endregion with training_args.strategy.scope(): # region Load pretrained model if checkpoint is None: model_path = model_args.model_name_or_path else: model_path = checkpoint model = TFAutoModelForSequenceClassification.from_pretrained( model_path, config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) # endregion # region Convert data to a tf.data.Dataset dataset_options = tf.data.Options() dataset_options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF num_replicas = training_args.strategy.num_replicas_in_sync tf_data = {} max_samples = { "train": data_args.max_train_samples, "validation": data_args.max_eval_samples, "validation_matched": data_args.max_eval_samples, "validation_mismatched": data_args.max_eval_samples, "test": data_args.max_predict_samples, "test_matched": data_args.max_predict_samples, "test_mismatched": data_args.max_predict_samples, "user_data": None, } for key in datasets.keys(): if key == "train" or key.startswith("validation"): assert "label" in datasets[key].features, f"Missing labels from {key} data!" if key == "train": shuffle = True batch_size = training_args.per_device_train_batch_size * num_replicas else: shuffle = False batch_size = training_args.per_device_eval_batch_size * num_replicas samples_limit = max_samples[key] dataset = datasets[key] if samples_limit is not None: dataset = dataset.select(range(samples_limit)) # model.prepare_tf_dataset() wraps a Hugging Face dataset in a tf.data.Dataset which is ready to use in # training. This is the recommended way to use a Hugging Face dataset when training with Keras. You can also # use the lower-level dataset.to_tf_dataset() method, but you will have to specify things like column names # yourself if you use this method, whereas they are automatically inferred from the model input names when # using model.prepare_tf_dataset() # For more info see the docs: # https://huggingface.co/docs/transformers/main/en/main_classes/model#transformers.TFPreTrainedModel.prepare_tf_dataset # https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.to_tf_dataset data = model.prepare_tf_dataset( dataset, shuffle=shuffle, batch_size=batch_size, collate_fn=data_collator, tokenizer=tokenizer, ) data = data.with_options(dataset_options) tf_data[key] = data # endregion # region Optimizer, loss and compilation if training_args.do_train: num_train_steps = len(tf_data["train"]) * training_args.num_train_epochs if training_args.warmup_steps > 0: num_warmup_steps = training_args.warmup_steps elif training_args.warmup_ratio > 0: num_warmup_steps = int(num_train_steps * training_args.warmup_ratio) else: num_warmup_steps = 0 optimizer, schedule = create_optimizer( init_lr=training_args.learning_rate, num_train_steps=num_train_steps, num_warmup_steps=num_warmup_steps, adam_beta1=training_args.adam_beta1, adam_beta2=training_args.adam_beta2, adam_epsilon=training_args.adam_epsilon, weight_decay_rate=training_args.weight_decay, adam_global_clipnorm=training_args.max_grad_norm, ) else: optimizer = "sgd" # Just write anything because we won't be using it if is_regression: metrics = [] else: metrics = ["accuracy"] # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=optimizer, metrics=metrics, jit_compile=training_args.xla) # endregion # region Preparing push_to_hub and model card push_to_hub_model_id = training_args.push_to_hub_model_id model_name = model_args.model_name_or_path.split("/")[-1] if not push_to_hub_model_id: push_to_hub_model_id = f"{model_name}-finetuned-glue" model_card_kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"} model_card_kwargs["task_name"] = data_args.task_name if training_args.push_to_hub: callbacks = [ PushToHubCallback( output_dir=training_args.output_dir, hub_model_id=push_to_hub_model_id, hub_token=training_args.push_to_hub_token, tokenizer=tokenizer, **model_card_kwargs, ) ] else: callbacks = [] # endregion # region Training and validation if training_args.do_train: if training_args.do_eval and not data_args.task_name == "mnli": # Do both evaluation and training in the Keras fit loop, unless the task is MNLI # because MNLI has two validation sets validation_data = tf_data["validation"] else: validation_data = None model.fit( tf_data["train"], validation_data=validation_data, epochs=int(training_args.num_train_epochs), callbacks=callbacks, ) # endregion # region Evaluation if training_args.do_eval: # We normally do validation as part of the Keras fit loop, but we run it independently # if there was no fit() step (because we didn't train the model) or if the task is MNLI, # because MNLI has a separate validation-mismatched validation set # In this example, we compute advanced metrics only at the end of training, and only compute # loss and accuracy on the validation set each epoch, but # if you'd like to compute metrics every epoch that are too complex to be written as # standard Keras metrics, you can use our KerasMetricCallback. See # https://huggingface.co/docs/transformers/main/en/main_classes/keras_callbacks logger.info("*** Evaluate ***") # Loop to handle MNLI double evaluation (matched, mis-matched) if data_args.task_name == "mnli": tasks = ["mnli", "mnli-mm"] tf_datasets = [tf_data["validation_matched"], tf_data["validation_mismatched"]] raw_datasets = [datasets["validation_matched"], datasets["validation_mismatched"]] else: tasks = [data_args.task_name] tf_datasets = [tf_data["validation"]] raw_datasets = [datasets["validation"]] for raw_dataset, tf_dataset, task in zip(raw_datasets, tf_datasets, tasks): eval_predictions = model.predict(tf_dataset) eval_metrics = compute_metrics(eval_predictions, raw_dataset["label"]) print(f"Evaluation metrics ({task}):") print(eval_metrics) if training_args.output_dir is not None: output_eval_file = os.path.join(training_args.output_dir, "all_results.json") with open(output_eval_file, "w") as writer: writer.write(json.dumps(eval_metrics)) # endregion # region Prediction if training_args.do_predict or data_args.predict_file: logger.info("*** Predict ***") # Loop to handle MNLI double evaluation (matched, mis-matched) tasks = [] tf_datasets = [] raw_datasets = [] if training_args.do_predict: if data_args.task_name == "mnli": tasks.extend(["mnli", "mnli-mm"]) tf_datasets.extend([tf_data["test_matched"], tf_data["test_mismatched"]]) raw_datasets.extend([datasets["test_matched"], datasets["test_mismatched"]]) else: tasks.append(data_args.task_name) tf_datasets.append(tf_data["test"]) raw_datasets.append(datasets["test"]) if data_args.predict_file: tasks.append("user_data") tf_datasets.append(tf_data["user_data"]) raw_datasets.append(datasets["user_data"]) for raw_dataset, tf_dataset, task in zip(raw_datasets, tf_datasets, tasks): test_predictions = model.predict(tf_dataset) if "label" in raw_dataset: test_metrics = compute_metrics(test_predictions, raw_dataset["label"]) print(f"Test metrics ({task}):") print(test_metrics) if is_regression: predictions_to_write = np.squeeze(test_predictions["logits"]) else: predictions_to_write = np.argmax(test_predictions["logits"], axis=1) output_predict_file = os.path.join(training_args.output_dir, f"predict_results_{task}.txt") with open(output_predict_file, "w") as writer: logger.info(f"***** Writing prediction results for {task} *****") writer.write("index\tprediction\n") for index, item in enumerate(predictions_to_write): if is_regression: writer.write(f"{index}\t{item:3.3f}\n") else: item = model.config.id2label[item] writer.write(f"{index}\t{item}\n") # endregion if training_args.output_dir is not None and not training_args.push_to_hub: # If we're not pushing to hub, at least save a local copy when we're done model.save_pretrained(training_args.output_dir) if __name__ == "__main__": main()
transformers/examples/tensorflow/text-classification/run_glue.py/0
{ "file_path": "transformers/examples/tensorflow/text-classification/run_glue.py", "repo_id": "transformers", "token_count": 11377 }
319
#!/usr/bin/env bash # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script acquires data and converts it to fsmt model # it covers: # - allenai/wmt19-de-en-6-6-base # - allenai/wmt19-de-en-6-6-big # this script needs to be run from the top level of the transformers repo if [ ! -d "src/transformers" ]; then echo "Error: This script needs to be run from the top of the transformers repo" exit 1 fi mkdir data # get data (run once) cd data gdown 'https://drive.google.com/uc?id=1j6z9fYdlUyOYsh7KJoumRlr1yHczxR5T' gdown 'https://drive.google.com/uc?id=1yT7ZjqfvUYOBXvMjeY8uGRHQFWoSo8Q5' gdown 'https://drive.google.com/uc?id=15gAzHeRUCs-QV8vHeTReMPEh1j8excNE' tar -xvzf wmt19.de-en.tar.gz tar -xvzf wmt19_deen_base_dr0.1_1.tar.gz tar -xvzf wmt19_deen_big_dr0.1_2.tar.gz cp wmt19.de-en/data-bin/dict.*.txt wmt19_deen_base_dr0.1_1 cp wmt19.de-en/data-bin/dict.*.txt wmt19_deen_big_dr0.1_2 cd - # run conversions and uploads PYTHONPATH="src" python src/transformers/convert_fsmt_original_pytorch_checkpoint_to_pytorch.py --fsmt_checkpoint_path data/wmt19_deen_base_dr0.1_1/checkpoint_last3_avg.pt --pytorch_dump_folder_path data/wmt19-de-en-6-6-base PYTHONPATH="src" python src/transformers/convert_fsmt_original_pytorch_checkpoint_to_pytorch.py --fsmt_checkpoint_path data/wmt19_deen_big_dr0.1_2/checkpoint_last3_avg.pt --pytorch_dump_folder_path data/wmt19-de-en-6-6-big # upload cd data transformers-cli upload -y wmt19-de-en-6-6-base transformers-cli upload -y wmt19-de-en-6-6-big cd - # if updating just small files and not the large models, here is a script to generate the right commands: perl -le 'for $f (@ARGV) { print qq[transformers-cli upload -y $_/$f --filename $_/$f] for ("wmt19-de-en-6-6-base", "wmt19-de-en-6-6-big")}' vocab-src.json vocab-tgt.json tokenizer_config.json config.json # add/remove files as needed
transformers/scripts/fsmt/convert-allenai-wmt19.sh/0
{ "file_path": "transformers/scripts/fsmt/convert-allenai-wmt19.sh", "repo_id": "transformers", "token_count": 950 }
320
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Simple check list from AllenNLP repo: https://github.com/allenai/allennlp/blob/main/setup.py To create the package for pypi. 1. Create the release branch named: v<RELEASE>-release, for example v4.19-release. For a patch release checkout the current release branch. If releasing on a special branch, copy the updated README.md on the main branch for your the commit you will make for the post-release and run `make fix-copies` on the main branch as well. 2. Run `make pre-release` (or `make pre-patch` for a patch release) and commit these changes with the message: "Release: <VERSION>" and push. 3. Go back to the main branch and run `make post-release` then `make fix-copies`. Commit these changes with the message "v<NEXT_VERSION>.dev.0" and push to main. # If you were just cutting the branch in preparation for a release, you can stop here for now. 4. Wait for the tests on the release branch to be completed and be green (otherwise revert and fix bugs) 5. On the release branch, add a tag in git to mark the release: "git tag v<VERSION> -m 'Adds tag v<VERSION> for pypi' " Push the tag to git: git push --tags origin v<RELEASE>-release 6. Build both the sources and the wheel. Do not change anything in setup.py between creating the wheel and the source distribution (obviously). Run `make build-release`. This will build the release and do some sanity checks for you. If this ends with an error message, you need to fix things before going further. You should now have a /dist directory with both .whl and .tar.gz source versions. 7. Check that everything looks correct by uploading the package to the pypi test server: twine upload dist/* -r testpypi (pypi suggest using twine as other methods upload files via plaintext.) You may have to specify the repository url, use the following command then: twine upload dist/* -r testpypi --repository-url=https://test.pypi.org/legacy/ Check that you can install it in a virtualenv by running: pip install -i https://testpypi.python.org/pypi transformers Check you can run the following commands: python -c "from transformers import pipeline; classifier = pipeline('text-classification'); print(classifier('What a nice release'))" python -c "from transformers import *" python utils/check_build.py --check_lib If making a patch release, double check the bug you are patching is indeed resolved. 8. Upload the final version to actual pypi: twine upload dist/* -r pypi 9. Copy the release notes from RELEASE.md to the tag in github once everything is looking hunky-dory. """ import os import re import shutil from pathlib import Path from setuptools import Command, find_packages, setup # Remove stale transformers.egg-info directory to avoid https://github.com/pypa/pip/issues/5466 stale_egg_info = Path(__file__).parent / "transformers.egg-info" if stale_egg_info.exists(): print( ( "Warning: {} exists.\n\n" "If you recently updated transformers to 3.0 or later, this is expected,\n" "but it may prevent transformers from installing in editable mode.\n\n" "This directory is automatically generated by Python's packaging tools.\n" "I will remove it now.\n\n" "See https://github.com/pypa/pip/issues/5466 for details.\n" ).format(stale_egg_info) ) shutil.rmtree(stale_egg_info) # IMPORTANT: # 1. all dependencies should be listed here with their version requirements if any # 2. once modified, run: `make deps_table_update` to update src/transformers/dependency_versions_table.py _deps = [ "Pillow>=10.0.1,<=15.0", "accelerate>=0.21.0", "av==9.2.0", # Latest version of PyAV (10.0.0) has issues with audio stream. "beautifulsoup4", "codecarbon==1.2.0", "cookiecutter==1.7.3", "dataclasses", "datasets!=2.5.0", "decord==0.6.0", "deepspeed>=0.9.3", "diffusers", "dill<0.3.5", "evaluate>=0.2.0", "faiss-cpu", "fastapi", "filelock", "flax>=0.4.1,<=0.7.0", "fsspec<2023.10.0", "ftfy", "fugashi>=1.0", "GitPython<3.1.19", "hf-doc-builder>=0.3.0", "huggingface-hub>=0.19.3,<1.0", "importlib_metadata", "ipadic>=1.0.0,<2.0", "isort>=5.5.4", "jax>=0.4.1,<=0.4.13", "jaxlib>=0.4.1,<=0.4.13", "jieba", "kenlm", # Keras pin - this is to make sure Keras 3 doesn't destroy us. Remove or change when we have proper support. "keras<2.16", "keras-nlp>=0.3.1", "librosa", "nltk", "natten>=0.14.6,<0.15.0", "numpy>=1.17", "onnxconverter-common", "onnxruntime-tools>=1.4.2", "onnxruntime>=1.4.0", "opencv-python", "optuna", "optax>=0.0.8,<=0.1.4", "packaging>=20.0", "parameterized", "phonemizer", "protobuf", "psutil", "pyyaml>=5.1", "pydantic", "pytest>=7.2.0,<8.0.0", "pytest-timeout", "pytest-xdist", "python>=3.8.0", "ray[tune]>=2.7.0", "regex!=2019.12.17", "requests", "rhoknp>=1.1.0,<1.3.1", "rjieba", "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1", "ruff==0.1.5", "sacrebleu>=1.4.12,<2.0.0", "sacremoses", "safetensors>=0.4.1", "sagemaker>=2.31.0", "scikit-learn", "sentencepiece>=0.1.91,!=0.1.92", "sigopt", "starlette", "sudachipy>=0.6.6", "sudachidict_core>=20220729", "tensorboard", # TensorFlow pin. When changing this value, update examples/tensorflow/_tests_requirements.txt accordingly "tensorflow-cpu>=2.6,<2.16", "tensorflow>=2.6,<2.16", "tensorflow-text<2.16", "tf2onnx", "timeout-decorator", "timm", "tokenizers>=0.14,<0.19", "torch", "torchaudio", "torchvision", "pyctcdecode>=0.4.0", "tqdm>=4.27", "unidic>=1.0.2", "unidic_lite>=1.0.7", "urllib3<2.0.0", "uvicorn", ] # this is a lookup table with items like: # # tokenizers: "tokenizers==0.9.4" # packaging: "packaging" # # some of the values are versioned whereas others aren't. deps = {b: a for a, b in (re.findall(r"^(([^!=<>~ ]+)(?:[!=<>~ ].*)?$)", x)[0] for x in _deps)} # since we save this data in src/transformers/dependency_versions_table.py it can be easily accessed from # anywhere. If you need to quickly access the data from this table in a shell, you can do so easily with: # # python -c 'import sys; from transformers.dependency_versions_table import deps; \ # print(" ".join([ deps[x] for x in sys.argv[1:]]))' tokenizers datasets # # Just pass the desired package names to that script as it's shown with 2 packages above. # # If transformers is not yet installed and the work is done from the cloned repo remember to add `PYTHONPATH=src` to the script above # # You can then feed this for example to `pip`: # # pip install -U $(python -c 'import sys; from transformers.dependency_versions_table import deps; \ # print(" ".join([deps[x] for x in sys.argv[1:]]))' tokenizers datasets) # def deps_list(*pkgs): return [deps[pkg] for pkg in pkgs] class DepsTableUpdateCommand(Command): """ A custom distutils command that updates the dependency table. usage: python setup.py deps_table_update """ description = "build runtime dependency table" user_options = [ # format: (long option, short option, description). ("dep-table-update", None, "updates src/transformers/dependency_versions_table.py"), ] def initialize_options(self): pass def finalize_options(self): pass def run(self): entries = "\n".join([f' "{k}": "{v}",' for k, v in deps.items()]) content = [ "# THIS FILE HAS BEEN AUTOGENERATED. To update:", "# 1. modify the `_deps` dict in setup.py", "# 2. run `make deps_table_update``", "deps = {", entries, "}", "", ] target = "src/transformers/dependency_versions_table.py" print(f"updating {target}") with open(target, "w", encoding="utf-8", newline="\n") as f: f.write("\n".join(content)) extras = {} extras["ja"] = deps_list("fugashi", "ipadic", "unidic_lite", "unidic", "sudachipy", "sudachidict_core", "rhoknp") extras["sklearn"] = deps_list("scikit-learn") extras["tf"] = deps_list("tensorflow", "onnxconverter-common", "tf2onnx", "tensorflow-text", "keras-nlp") extras["tf-cpu"] = deps_list("tensorflow-cpu", "onnxconverter-common", "tf2onnx", "tensorflow-text", "keras-nlp") extras["torch"] = deps_list("torch", "accelerate") extras["accelerate"] = deps_list("accelerate") if os.name == "nt": # windows extras["retrieval"] = deps_list("datasets") # faiss is not supported on windows extras["flax"] = [] # jax is not supported on windows else: extras["retrieval"] = deps_list("faiss-cpu", "datasets") extras["flax"] = deps_list("jax", "jaxlib", "flax", "optax") extras["tokenizers"] = deps_list("tokenizers") extras["ftfy"] = deps_list("ftfy") extras["onnxruntime"] = deps_list("onnxruntime", "onnxruntime-tools") extras["onnx"] = deps_list("onnxconverter-common", "tf2onnx") + extras["onnxruntime"] extras["modelcreation"] = deps_list("cookiecutter") extras["sagemaker"] = deps_list("sagemaker") extras["deepspeed"] = deps_list("deepspeed") + extras["accelerate"] extras["optuna"] = deps_list("optuna") extras["ray"] = deps_list("ray[tune]") extras["sigopt"] = deps_list("sigopt") extras["integrations"] = extras["optuna"] + extras["ray"] + extras["sigopt"] extras["serving"] = deps_list("pydantic", "uvicorn", "fastapi", "starlette") extras["audio"] = deps_list("librosa", "pyctcdecode", "phonemizer", "kenlm") # `pip install ".[speech]"` is deprecated and `pip install ".[torch-speech]"` should be used instead extras["speech"] = deps_list("torchaudio") + extras["audio"] extras["torch-speech"] = deps_list("torchaudio") + extras["audio"] extras["tf-speech"] = extras["audio"] extras["flax-speech"] = extras["audio"] extras["vision"] = deps_list("Pillow") extras["timm"] = deps_list("timm") extras["torch-vision"] = deps_list("torchvision") + extras["vision"] extras["natten"] = deps_list("natten") extras["codecarbon"] = deps_list("codecarbon") extras["video"] = deps_list("decord", "av") extras["sentencepiece"] = deps_list("sentencepiece", "protobuf") extras["testing"] = ( deps_list( "pytest", "pytest-xdist", "timeout-decorator", "parameterized", "psutil", "datasets", "dill", "evaluate", "pytest-timeout", "ruff", "sacrebleu", "rouge-score", "nltk", "GitPython", "hf-doc-builder", "protobuf", # Can be removed once we can unpin protobuf "sacremoses", "rjieba", "beautifulsoup4", "tensorboard", "pydantic", "sentencepiece", ) + extras["retrieval"] + extras["modelcreation"] ) extras["deepspeed-testing"] = extras["deepspeed"] + extras["testing"] + extras["optuna"] + extras["sentencepiece"] extras["quality"] = deps_list("datasets", "isort", "ruff", "GitPython", "hf-doc-builder", "urllib3") extras["all"] = ( extras["tf"] + extras["torch"] + extras["flax"] + extras["sentencepiece"] + extras["tokenizers"] + extras["torch-speech"] + extras["vision"] + extras["integrations"] + extras["timm"] + extras["torch-vision"] + extras["codecarbon"] + extras["accelerate"] + extras["video"] ) # Might need to add doc-builder and some specific deps in the future extras["docs_specific"] = ["hf-doc-builder"] # "docs" needs "all" to resolve all the references extras["docs"] = extras["all"] + extras["docs_specific"] extras["dev-torch"] = ( extras["testing"] + extras["torch"] + extras["sentencepiece"] + extras["tokenizers"] + extras["torch-speech"] + extras["vision"] + extras["integrations"] + extras["timm"] + extras["torch-vision"] + extras["codecarbon"] + extras["quality"] + extras["ja"] + extras["docs_specific"] + extras["sklearn"] + extras["modelcreation"] + extras["onnxruntime"] ) extras["dev-tensorflow"] = ( extras["testing"] + extras["tf"] + extras["sentencepiece"] + extras["tokenizers"] + extras["vision"] + extras["quality"] + extras["docs_specific"] + extras["sklearn"] + extras["modelcreation"] + extras["onnx"] + extras["tf-speech"] ) extras["dev"] = ( extras["all"] + extras["testing"] + extras["quality"] + extras["ja"] + extras["docs_specific"] + extras["sklearn"] + extras["modelcreation"] ) extras["torchhub"] = deps_list( "filelock", "huggingface-hub", "importlib_metadata", "numpy", "packaging", "protobuf", "regex", "requests", "sentencepiece", "torch", "tokenizers", "tqdm", ) extras["agents"] = deps_list( "diffusers", "accelerate", "datasets", "torch", "sentencepiece", "opencv-python", "Pillow" ) # when modifying the following list, make sure to update src/transformers/dependency_versions_check.py install_requires = [ deps["filelock"], # filesystem locks, e.g., to prevent parallel downloads deps["huggingface-hub"], deps["numpy"], deps["packaging"], # utilities from PyPA to e.g., compare versions deps["pyyaml"], # used for the model cards metadata deps["regex"], # for OpenAI GPT deps["requests"], # for downloading models over HTTPS deps["tokenizers"], deps["safetensors"], deps["tqdm"], # progress bars in model download and training scripts ] setup( name="transformers", version="4.40.0.dev0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots) author="The Hugging Face team (past and future) with the help of all our contributors (https://github.com/huggingface/transformers/graphs/contributors)", author_email="[email protected]", description="State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow", long_description=open("README.md", "r", encoding="utf-8").read(), long_description_content_type="text/markdown", keywords="NLP vision speech deep learning transformer pytorch tensorflow jax BERT GPT-2 Wav2Vec2 ViT", license="Apache 2.0 License", url="https://github.com/huggingface/transformers", package_dir={"": "src"}, packages=find_packages("src"), include_package_data=True, package_data={"": ["**/*.cu", "**/*.cpp", "**/*.cuh", "**/*.h", "**/*.pyx"]}, zip_safe=False, extras_require=extras, entry_points={"console_scripts": ["transformers-cli=transformers.commands.transformers_cli:main"]}, python_requires=">=3.8.0", install_requires=list(install_requires), classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Intended Audience :: Education", "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Topic :: Scientific/Engineering :: Artificial Intelligence", ], cmdclass={"deps_table_update": DepsTableUpdateCommand}, )
transformers/setup.py/0
{ "file_path": "transformers/setup.py", "repo_id": "transformers", "token_count": 6420 }
321
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser, Namespace from ..utils import logging from . import BaseTransformersCLICommand def convert_command_factory(args: Namespace): """ Factory function used to convert a model TF 1.0 checkpoint in a PyTorch checkpoint. Returns: ServeCommand """ return ConvertCommand( args.model_type, args.tf_checkpoint, args.pytorch_dump_output, args.config, args.finetuning_task_name ) IMPORT_ERROR_MESSAGE = """ transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions. """ class ConvertCommand(BaseTransformersCLICommand): @staticmethod def register_subcommand(parser: ArgumentParser): """ Register this command to argparse so it's available for the transformer-cli Args: parser: Root parser to register command-specific arguments """ train_parser = parser.add_parser( "convert", help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.", ) train_parser.add_argument("--model_type", type=str, required=True, help="Model's type.") train_parser.add_argument( "--tf_checkpoint", type=str, required=True, help="TensorFlow checkpoint path or folder." ) train_parser.add_argument( "--pytorch_dump_output", type=str, required=True, help="Path to the PyTorch saved model output." ) train_parser.add_argument("--config", type=str, default="", help="Configuration file path or folder.") train_parser.add_argument( "--finetuning_task_name", type=str, default=None, help="Optional fine-tuning task name if the TF model was a finetuned model.", ) train_parser.set_defaults(func=convert_command_factory) def __init__( self, model_type: str, tf_checkpoint: str, pytorch_dump_output: str, config: str, finetuning_task_name: str, *args, ): self._logger = logging.get_logger("transformers-cli/converting") self._logger.info(f"Loading model {model_type}") self._model_type = model_type self._tf_checkpoint = tf_checkpoint self._pytorch_dump_output = pytorch_dump_output self._config = config self._finetuning_task_name = finetuning_task_name def run(self): if self._model_type == "albert": try: from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(IMPORT_ERROR_MESSAGE) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output) elif self._model_type == "bert": try: from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(IMPORT_ERROR_MESSAGE) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output) elif self._model_type == "funnel": try: from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(IMPORT_ERROR_MESSAGE) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output) elif self._model_type == "t5": try: from ..models.t5.convert_t5_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: raise ImportError(IMPORT_ERROR_MESSAGE) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output) elif self._model_type == "gpt": from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import ( convert_openai_checkpoint_to_pytorch, ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output) elif self._model_type == "gpt2": try: from ..models.gpt2.convert_gpt2_original_tf_checkpoint_to_pytorch import ( convert_gpt2_checkpoint_to_pytorch, ) except ImportError: raise ImportError(IMPORT_ERROR_MESSAGE) convert_gpt2_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output) elif self._model_type == "xlnet": try: from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import ( convert_xlnet_checkpoint_to_pytorch, ) except ImportError: raise ImportError(IMPORT_ERROR_MESSAGE) convert_xlnet_checkpoint_to_pytorch( self._tf_checkpoint, self._config, self._pytorch_dump_output, self._finetuning_task_name ) elif self._model_type == "xlm": from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( convert_xlm_checkpoint_to_pytorch, ) convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output) elif self._model_type == "lxmert": from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import ( convert_lxmert_checkpoint_to_pytorch, ) convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output) elif self._model_type == "rembert": from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import ( convert_rembert_tf_checkpoint_to_pytorch, ) convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output) else: raise ValueError("--model_type should be selected in the list [bert, gpt, gpt2, t5, xlnet, xlm, lxmert]")
transformers/src/transformers/commands/convert.py/0
{ "file_path": "transformers/src/transformers/commands/convert.py", "repo_id": "transformers", "token_count": 3159 }
322
# THIS FILE HAS BEEN AUTOGENERATED. To update: # 1. modify the `_deps` dict in setup.py # 2. run `make deps_table_update`` deps = { "Pillow": "Pillow>=10.0.1,<=15.0", "accelerate": "accelerate>=0.21.0", "av": "av==9.2.0", "beautifulsoup4": "beautifulsoup4", "codecarbon": "codecarbon==1.2.0", "cookiecutter": "cookiecutter==1.7.3", "dataclasses": "dataclasses", "datasets": "datasets!=2.5.0", "decord": "decord==0.6.0", "deepspeed": "deepspeed>=0.9.3", "diffusers": "diffusers", "dill": "dill<0.3.5", "evaluate": "evaluate>=0.2.0", "faiss-cpu": "faiss-cpu", "fastapi": "fastapi", "filelock": "filelock", "flax": "flax>=0.4.1,<=0.7.0", "fsspec": "fsspec<2023.10.0", "ftfy": "ftfy", "fugashi": "fugashi>=1.0", "GitPython": "GitPython<3.1.19", "hf-doc-builder": "hf-doc-builder>=0.3.0", "huggingface-hub": "huggingface-hub>=0.19.3,<1.0", "importlib_metadata": "importlib_metadata", "ipadic": "ipadic>=1.0.0,<2.0", "isort": "isort>=5.5.4", "jax": "jax>=0.4.1,<=0.4.13", "jaxlib": "jaxlib>=0.4.1,<=0.4.13", "jieba": "jieba", "kenlm": "kenlm", "keras": "keras<2.16", "keras-nlp": "keras-nlp>=0.3.1", "librosa": "librosa", "nltk": "nltk", "natten": "natten>=0.14.6,<0.15.0", "numpy": "numpy>=1.17", "onnxconverter-common": "onnxconverter-common", "onnxruntime-tools": "onnxruntime-tools>=1.4.2", "onnxruntime": "onnxruntime>=1.4.0", "opencv-python": "opencv-python", "optuna": "optuna", "optax": "optax>=0.0.8,<=0.1.4", "packaging": "packaging>=20.0", "parameterized": "parameterized", "phonemizer": "phonemizer", "protobuf": "protobuf", "psutil": "psutil", "pyyaml": "pyyaml>=5.1", "pydantic": "pydantic", "pytest": "pytest>=7.2.0,<8.0.0", "pytest-timeout": "pytest-timeout", "pytest-xdist": "pytest-xdist", "python": "python>=3.8.0", "ray[tune]": "ray[tune]>=2.7.0", "regex": "regex!=2019.12.17", "requests": "requests", "rhoknp": "rhoknp>=1.1.0,<1.3.1", "rjieba": "rjieba", "rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1", "ruff": "ruff==0.1.5", "sacrebleu": "sacrebleu>=1.4.12,<2.0.0", "sacremoses": "sacremoses", "safetensors": "safetensors>=0.4.1", "sagemaker": "sagemaker>=2.31.0", "scikit-learn": "scikit-learn", "sentencepiece": "sentencepiece>=0.1.91,!=0.1.92", "sigopt": "sigopt", "starlette": "starlette", "sudachipy": "sudachipy>=0.6.6", "sudachidict_core": "sudachidict_core>=20220729", "tensorboard": "tensorboard", "tensorflow-cpu": "tensorflow-cpu>=2.6,<2.16", "tensorflow": "tensorflow>=2.6,<2.16", "tensorflow-text": "tensorflow-text<2.16", "tf2onnx": "tf2onnx", "timeout-decorator": "timeout-decorator", "timm": "timm", "tokenizers": "tokenizers>=0.14,<0.19", "torch": "torch", "torchaudio": "torchaudio", "torchvision": "torchvision", "pyctcdecode": "pyctcdecode>=0.4.0", "tqdm": "tqdm>=4.27", "unidic": "unidic>=1.0.2", "unidic_lite": "unidic_lite>=1.0.7", "urllib3": "urllib3<2.0.0", "uvicorn": "uvicorn", }
transformers/src/transformers/dependency_versions_table.py/0
{ "file_path": "transformers/src/transformers/dependency_versions_table.py", "repo_id": "transformers", "token_count": 1755 }
323
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import inspect import warnings from dataclasses import dataclass from typing import Any, Dict, Optional, Tuple, Union import numpy as np import tensorflow as tf from tensorflow.compiler.tf2xla.python.xla import dynamic_update_slice from ..modeling_tf_outputs import TFCausalLMOutputWithPast, TFSeq2SeqLMOutput from ..models.auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, TF_MODEL_FOR_VISION_2_SEQ_MAPPING, ) from ..tf_utils import shape_list, stable_softmax from ..utils import ModelOutput, logging from .configuration_utils import GenerationConfig from .tf_logits_process import ( TFForcedBOSTokenLogitsProcessor, TFForcedEOSTokenLogitsProcessor, TFForceTokensLogitsProcessor, TFLogitsProcessorList, TFMinLengthLogitsProcessor, TFNoBadWordsLogitsProcessor, TFNoRepeatNGramLogitsProcessor, TFRepetitionPenaltyLogitsProcessor, TFSuppressTokensAtBeginLogitsProcessor, TFSuppressTokensLogitsProcessor, TFTemperatureLogitsWarper, TFTopKLogitsWarper, TFTopPLogitsWarper, ) logger = logging.get_logger(__name__) @dataclass class TFGreedySearchDecoderOnlyOutput(ModelOutput): """ Base class for outputs of decoder-only generation models using greedy search. Args: sequences (`tf.Tensor` of shape `(batch_size, sequence_length)`): The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) at each generation step. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each generated token), with each tensor of shape `(batch_size, config.vocab_size)`. attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(batch_size, generated_length, hidden_size)`. """ sequences: tf.Tensor = None scores: Optional[Tuple[tf.Tensor]] = None attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None @dataclass class TFGreedySearchEncoderDecoderOutput(ModelOutput): """ Base class for outputs of encoder-decoder generation models using greedy search. Hidden states and attention weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes) Args: sequences (`tf.Tensor` of shape `(batch_size, sequence_length)`): The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) at each generation step. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each generated token), with each tensor of shape `(batch_size, config.vocab_size)`. encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer of the decoder) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. decoder_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. cross_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. decoder_hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(batch_size, generated_length, hidden_size)`. """ sequences: tf.Tensor = None scores: Optional[Tuple[tf.Tensor]] = None encoder_attentions: Optional[Tuple[tf.Tensor]] = None encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None decoder_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None cross_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None decoder_hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None @dataclass class TFSampleDecoderOnlyOutput(ModelOutput): """ Base class for outputs of decoder-only generation models using sampling. Args: sequences (`tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)`): The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) at each generation step. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each generated token), with each tensor of shape `(batch_size*num_return_sequences, config.vocab_size)`. attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(num_return_sequences*batch_size, num_heads, generated_length, sequence_length)`. hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(num_return_sequences*batch_size, generated_length, hidden_size)`. """ sequences: tf.Tensor = None scores: Optional[Tuple[tf.Tensor]] = None attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None @dataclass class TFSampleEncoderDecoderOutput(ModelOutput): """ Base class for outputs of encoder-decoder generation models using sampling. Hidden states and attention weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes) Args: sequences (`tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)`): The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) at each generation step. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each generated token), with each tensor of shape `(batch_size*num_return_sequences, config.vocab_size)`. encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer of the decoder) of shape `(batch_size*num_return_sequences, num_heads, sequence_length, sequence_length)`. encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size*num_return_sequences, sequence_length, hidden_size)`. decoder_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(batch_size*num_return_sequences, num_heads, generated_length, sequence_length)`. cross_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. decoder_hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(batch_size*num_return_sequences, generated_length, hidden_size)`. """ sequences: tf.Tensor = None scores: Optional[Tuple[tf.Tensor]] = None encoder_attentions: Optional[Tuple[tf.Tensor]] = None encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None decoder_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None cross_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None decoder_hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None @dataclass class TFBeamSearchDecoderOnlyOutput(ModelOutput): """ Base class for outputs of decoder-only generation models using beam search. Args: sequences (`tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)`): The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. sequences_scores (`tf.Tensor` of shape `(batch_size*num_return_sequences)`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Final beam scores of the generated `sequences`. scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this beam. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each generated token), with each tensor of shape `(batch_size*num_beams*num_return_sequences, config.vocab_size)`. beam_indices (`tf.Tensor`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Beam indices of generated token id at each generation step. `tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)`. attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(batch_size*num_beams, num_heads, generated_length, sequence_length)`. hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(batch_size*num_beams*num_return_sequences, generated_length, hidden_size)`. """ sequences: tf.Tensor = None sequences_scores: Optional[tf.Tensor] = None scores: Optional[Tuple[tf.Tensor]] = None beam_indices: Optional[tf.Tensor] = None attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None @dataclass class TFBeamSearchEncoderDecoderOutput(ModelOutput): """ Base class for outputs of encoder-decoder generation models using beam search. Hidden states and attention weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes) Args: sequences (`tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)`): The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. sequences_scores (`tf.Tensor` of shape `(batch_size*num_return_sequences)`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Final beam scores of the generated `sequences`. scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this beam. `Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each generated token), with each tensor of shape `(batch_size*num_beams, config.vocab_size)`. beam_indices (`tf.Tensor`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Beam indices of generated token id at each generation step. `tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)`. encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer of the decoder) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size*num_beams*num_return_sequences, sequence_length, hidden_size)`. decoder_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(batch_size*num_beams*num_return_sequences, num_heads, generated_length, sequence_length)`. cross_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. decoder_hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(batch_size*num_beams*num_return_sequences, generated_length, hidden_size)`. """ sequences: tf.Tensor = None sequences_scores: Optional[tf.Tensor] = None scores: Optional[Tuple[tf.Tensor]] = None beam_indices: Optional[tf.Tensor] = None encoder_attentions: Optional[Tuple[tf.Tensor]] = None encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None decoder_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None cross_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None decoder_hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None @dataclass class TFBeamSampleDecoderOnlyOutput(ModelOutput): """ Base class for outputs of decoder-only generation models using beam sample. Args: sequences (`tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)`): The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. sequences_scores (`tf.Tensor` of shape `(batch_size * num_return_sequence)`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Final beam scores of the generated `sequences`. scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this beam. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each generated token), with each tensor of shape `(batch_size*num_beams*num_return_sequences, config.vocab_size)`. beam_indices (`tf.Tensor`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Beam indices of generated token id at each generation step. `tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)`. attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(batch_size*num_beams, num_heads, generated_length, sequence_length)`. hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(batch_size*num_beams, generated_length, hidden_size)`. """ sequences: tf.Tensor = None sequences_scores: Optional[tf.Tensor] = None scores: Optional[Tuple[tf.Tensor]] = None beam_indices: Optional[tf.Tensor] = None attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None @dataclass class TFBeamSampleEncoderDecoderOutput(ModelOutput): """ Base class for outputs of encoder-decoder generation models using beam sampling. Hidden states and attention weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes) Args: sequences (`tf.Tensor` of shape `(batch_size*num_beams, sequence_length)`): The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. sequences_scores (`tf.Tensor` of shape `(batch_size * num_return_sequence)`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Final beam scores of the generated `sequences`. scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this beam. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each generated token), with each tensor of shape `(batch_size*num_beams, config.vocab_size)`. beam_indices (`tf.Tensor`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Beam indices of generated token id at each generation step. `tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)`. encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer of the decoder) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size*num_beams, sequence_length, hidden_size)`. decoder_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(batch_size*num_beams, num_heads, generated_length, sequence_length)`. cross_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. decoder_hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(batch_size*num_beams, generated_length, hidden_size)`. """ sequences: tf.Tensor = None sequences_scores: Optional[tf.Tensor] = None scores: Optional[Tuple[tf.Tensor]] = None beam_indices: Optional[tf.Tensor] = None encoder_attentions: Optional[Tuple[tf.Tensor]] = None encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None decoder_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None cross_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None decoder_hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None @dataclass class TFContrastiveSearchDecoderOnlyOutput(ModelOutput): """ Base class for outputs of decoder-only generation models using contrastive search. Args: sequences (`tf.Tensor` of shape `(batch_size, sequence_length)`): The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) at each generation step. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each generated token), with each tensor of shape `(batch_size, config.vocab_size)`. attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(batch_size, generated_length, hidden_size)`. """ sequences: tf.Tensor = None scores: Optional[Tuple[tf.Tensor]] = None attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None @dataclass class TFContrastiveSearchEncoderDecoderOutput(ModelOutput): """ Base class for outputs of encoder-decoder generation models using contrastive search. Hidden states and attention weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes) Args: sequences (`tf.Tensor` of shape `(batch_size, sequence_length)`): The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) at each generation step. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each generated token), with each tensor of shape `(batch_size, config.vocab_size)`. encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer of the decoder) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. decoder_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. cross_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. decoder_hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `tf.Tensor` of shape `(batch_size, generated_length, hidden_size)`. """ sequences: tf.Tensor = None scores: Optional[Tuple[tf.Tensor]] = None encoder_attentions: Optional[Tuple[tf.Tensor]] = None encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None decoder_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None cross_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None decoder_hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None TFGreedySearchOutput = Union[TFGreedySearchEncoderDecoderOutput, TFGreedySearchDecoderOnlyOutput] TFSampleOutput = Union[TFSampleEncoderDecoderOutput, TFSampleDecoderOnlyOutput] TFBeamSearchOutput = Union[TFBeamSearchEncoderDecoderOutput, TFBeamSearchDecoderOnlyOutput] TFBeamSampleOutput = Union[TFBeamSampleEncoderDecoderOutput, TFBeamSampleDecoderOnlyOutput] TFContrastiveSearchOutput = Union[TFContrastiveSearchEncoderDecoderOutput, TFContrastiveSearchDecoderOnlyOutput] TFGenerateOutput = Union[ TFGreedySearchOutput, TFSampleOutput, TFBeamSearchOutput, TFBeamSampleOutput, TFContrastiveSearchOutput ] class TFGenerationMixin: """ A class containing all of the functions supporting generation, to be used as a mixin in [`TFPreTrainedModel`]. The class exposes [`~generation.TFGenerationMixin.generate`], which can be used for: - *greedy decoding* by calling [`~generation.TFGenerationMixin.greedy_search`] if `num_beams=1` and `do_sample=False` - *contrastive search* by calling [`~generation.TFGenerationMixin.contrastive_search`] if `penalty_alpha>0` and `top_k>1` - *multinomial sampling* by calling [`~generation.TFGenerationMixin.sample`] if `num_beams=1` and `do_sample=True` - *beam-search decoding* by calling [`~generation.TFGenerationMixin.beam_search`] if `num_beams>1` You do not need to call any of the above methods directly. Pass custom parameter values to 'generate' instead. To learn more about decoding strategies refer to the [text generation strategies guide](../generation_strategies). """ _seed_generator = None @property def seed_generator(self): warnings.warn("`seed_generator` is deprecated and will be removed in a future version.", UserWarning) if self._seed_generator is None: self._seed_generator = tf.random.Generator.from_non_deterministic_state() return self._seed_generator supports_xla_generation = True def prepare_inputs_for_generation(self, *args, **kwargs): raise NotImplementedError( "A model class needs to define a `prepare_inputs_for_generation` method in order to use `generate`." ) def compute_transition_scores( self, sequences: tf.Tensor, scores: Tuple[tf.Tensor], beam_indices: Optional[tf.Tensor] = None, normalize_logits: bool = False, ) -> tf.Tensor: """ Computes the transition scores of sequences given the generation scores (and beam indices, if beam search was used). This is a convenient method to quicky obtain the scores of the selected tokens at generation time. Parameters: sequences (`tf.Tensor`): The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. scores (`tuple(tf.Tensor)`): Transition scores for each vocabulary token at each generation step. Beam transition scores consisting of log probabilities of tokens conditioned on log softmax of previously generated tokens Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each generated token), with each tensor of shape `(batch_size*num_beams, config.vocab_size)`. beam_indices (`tf.Tensor`, *optional*): Beam indices of generated token id at each generation step. `tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)`. Only required if a `num_beams>1` at generate-time. normalize_logits (`bool`, *optional*, defaults to `False`): Whether to normalize the logits (which, for legacy reasons, may be unnormalized). Return: `tf.Tensor`: A `tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)` containing the transition scores (logits) Examples: ```python >>> from transformers import GPT2Tokenizer, TFAutoModelForCausalLM >>> import numpy as np >>> tokenizer = GPT2Tokenizer.from_pretrained("openai-community/gpt2") >>> model = TFAutoModelForCausalLM.from_pretrained("openai-community/gpt2") >>> tokenizer.pad_token_id = tokenizer.eos_token_id >>> inputs = tokenizer(["Today is"], return_tensors="tf") >>> # Example 1: Print the scores for each token generated with Greedy Search >>> outputs = model.generate(**inputs, max_new_tokens=5, return_dict_in_generate=True, output_scores=True) >>> transition_scores = model.compute_transition_scores( ... outputs.sequences, outputs.scores, normalize_logits=True ... ) >>> # input_length is the length of the input prompt for decoder-only models, like the GPT family, and 1 for >>> # encoder-decoder models, like BART or T5. >>> input_length = 1 if model.config.is_encoder_decoder else inputs.input_ids.shape[1] >>> generated_tokens = outputs.sequences[:, input_length:] >>> for tok, score in zip(generated_tokens[0], transition_scores[0]): ... # | token | token string | logits | probability ... print(f"| {tok:5d} | {tokenizer.decode(tok):8s} | {score.numpy():.3f} | {np.exp(score.numpy()):.2%}") | 262 | the | -1.413 | 24.33% | 1110 | day | -2.609 | 7.36% | 618 | when | -2.009 | 13.41% | 356 | we | -1.859 | 15.58% | 460 | can | -2.508 | 8.14% >>> # Example 2: Reconstruct the sequence scores from Beam Search >>> outputs = model.generate( ... **inputs, ... max_new_tokens=5, ... num_beams=4, ... num_return_sequences=4, ... return_dict_in_generate=True, ... output_scores=True, ... ) >>> transition_scores = model.compute_transition_scores( ... outputs.sequences, outputs.scores, outputs.beam_indices, normalize_logits=False ... ) >>> # If you sum the generated tokens' scores and apply the length penalty, you'll get the sequence scores. >>> # Tip: recomputing the scores is only guaranteed to match with `normalize_logits=False`. Depending on the >>> # use case, you might want to recompute it with `normalize_logits=True`. >>> output_length = input_length + np.sum(transition_scores.numpy() < 0, axis=1) >>> length_penalty = model.generation_config.length_penalty >>> reconstructed_scores = np.sum(transition_scores, axis=1) / (output_length**length_penalty) >>> print(np.allclose(outputs.sequences_scores, reconstructed_scores)) True ```""" # 1. In absence of `beam_indices`, we can assume that we come from e.g. greedy search, which is equivalent # to a beam search approach were the first (and only) beam is always selected if beam_indices is None: beam_indices = tf.tile(tf.expand_dims(tf.range(scores[0].shape[0]), axis=1), [1, len(scores)]) # 2. reshape scores as [batch_size, vocab_size, # generation steps] with # generation steps being # seq_len - input_length scores = tf.transpose(tf.reshape(tf.stack(scores), (len(scores), -1)), (1, 0)) scores = tf.reshape(scores, (-1, self.config.vocab_size, scores.shape[-1])) # 3. Optionally normalize the logits (across the vocab dimension) if normalize_logits: scores = tf.nn.log_softmax(scores, axis=1) # 4. cut beam_indices to longest beam length beam_indices_mask = beam_indices < 0 max_beam_length = tf.math.reduce_max( tf.math.reduce_sum((1 - tf.cast(beam_indices_mask, dtype=tf.int32)), axis=-1) ) beam_indices = beam_indices[:, -max_beam_length:] beam_indices_mask = beam_indices_mask[:, -max_beam_length:] # 5. Set indices of beams that finished early to 0; such indices will be masked correctly afterwards beam_indices = tf.where(beam_indices_mask, 0, beam_indices) # 6. Define which indices contributed to scores cut_idx = sequences.shape[-1] - max_beam_length token_indices = sequences[:, cut_idx:] gen_step_idx = tf.broadcast_to(tf.range(scores.shape[-1]), token_indices.shape) indices = tf.stack([beam_indices, token_indices, gen_step_idx], axis=-1) # 7. Compute scores transition_scores = tf.gather_nd(scores, indices) # 8. Mask out transition_scores of beams that stopped early transition_scores = tf.where(beam_indices_mask, 0, transition_scores) return transition_scores def _validate_model_class(self): """ Confirms that the model class is compatible with generation. If not, raises an exception that points to the right class to use. """ if not self.can_generate(): generate_compatible_mappings = [ TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_VISION_2_SEQ_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, ] generate_compatible_classes = set() for model_mapping in generate_compatible_mappings: supported_models = model_mapping.get(type(self.config), default=None) if supported_models is not None: generate_compatible_classes.add(supported_models.__name__) exception_message = ( f"The current model class ({self.__class__.__name__}) is not compatible with `.generate()`, as " "it doesn't have a language model head." ) if generate_compatible_classes: exception_message += f" Please use one of the following classes instead: {generate_compatible_classes}" raise TypeError(exception_message) def _validate_model_kwargs(self, model_kwargs: Dict[str, Any]): """Validates model kwargs for generation. Generate argument typos will also be caught here.""" # Excludes arguments that are handled before calling any model function if self.config.is_encoder_decoder: for key in ["decoder_input_ids"]: model_kwargs.pop(key, None) unused_model_args = [] model_args = set(inspect.signature(self.prepare_inputs_for_generation).parameters) # `kwargs`/`model_kwargs` is often used to handle optional forward pass inputs like `attention_mask`. If # `prepare_inputs_for_generation` doesn't accept them, then a stricter check can be made ;) if "kwargs" in model_args or "model_kwargs" in model_args: model_args |= set(inspect.signature(self.call).parameters) for key, value in model_kwargs.items(): if value is not None and key not in model_args: unused_model_args.append(key) if unused_model_args: raise ValueError( f"The following `model_kwargs` are not used by the model: {unused_model_args} (note: typos in the" " generate arguments will also show up in this list)" ) def generate( self, inputs: Optional[tf.Tensor] = None, generation_config: Optional[GenerationConfig] = None, logits_processor: Optional[TFLogitsProcessorList] = None, seed=None, **kwargs, ) -> Union[TFGenerateOutput, tf.Tensor]: r""" Generates sequences of token ids for models with a language modeling head. <Tip warning={true}> Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the model's default generation configuration. You can override any `generation_config` by passing the corresponding parameters to generate, e.g. `.generate(inputs, num_beams=4, do_sample=True)`. For an overview of generation strategies and code examples, check out the [following guide](../generation_strategies). </Tip> Parameters: inputs (`tf.Tensor` of varying shape depending on the modality, *optional*): The sequence used as a prompt for the generation or as model inputs to the encoder. If `None` the method initializes it with `bos_token_id` and a batch size of 1. For decoder-only models `inputs` should of in the format of `input_ids`. For encoder-decoder models *inputs* can represent any of `input_ids`, `input_values`, `input_features`, or `pixel_values`. generation_config (`~generation.GenerationConfig`, *optional*): The generation configuration to be used as base parametrization for the generation call. `**kwargs` passed to generate matching the attributes of `generation_config` will override them. If `generation_config` is not provided, the default will be used, which had the following loading priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s default values, whose documentation should be checked to parameterize generation. logits_processor (`LogitsProcessorList`, *optional*): Custom logits processors that complement the default logits processors built from arguments and generation config. If a logit processor is passed that is already created with the arguments or a generation config an error is thrown. This feature is intended for advanced users. seed (`List[int]`, *optional*): Random seed to control sampling, containing two integers, used when `do_sample` is `True`. See the `seed` argument from stateless functions in `tf.random`. kwargs (`Dict[str, Any]`, *optional*): Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*. Return: [`~utils.ModelOutput`] or `tf.Tensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True` or when `config.return_dict_in_generate=True`) or a `tf.Tensor`. If the model is *not* an encoder-decoder model (`model.config.is_encoder_decoder=False`), the possible [`~utils.ModelOutput`] types are: - [`~generation.TFGreedySearchDecoderOnlyOutput`], - [`~generation.TFSampleDecoderOnlyOutput`], - [`~generation.TFBeamSearchDecoderOnlyOutput`], - [`~generation.TFBeamSampleDecoderOnlyOutput`] If the model is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible [`~utils.ModelOutput`] types are: - [`~generation.TFGreedySearchEncoderDecoderOutput`], - [`~generation.TFSampleEncoderDecoderOutput`], - [`~generation.TFBeamSearchEncoderDecoderOutput`], - [`~generation.TFBeamSampleEncoderDecoderOutput`] """ # 1. Handle `generation_config` and kwargs that might update it, and validate the `.generate()` call self._validate_model_class() # priority: `generation_config` argument > `model.generation_config` (the default generation config) if generation_config is None: # legacy: users may modify the model configuration to control generation. To trigger this legacy behavior, # two conditions must be met # 1) the generation config must have been created from the model config (`_from_model_config` field); # 2) the generation config must have seen no modification since its creation (the hash is the same). if self.generation_config._from_model_config and self.generation_config._original_object_hash == hash( self.generation_config ): new_generation_config = GenerationConfig.from_model_config(self.config) if new_generation_config != self.generation_config: warnings.warn( "You have modified the pretrained model configuration to control generation. This is a" " deprecated strategy to control generation and will be removed soon, in a future version." " Please use and modify the model generation configuration (see" " https://huggingface.co/docs/transformers/generation_strategies#default-text-generation-configuration )" ) self.generation_config = new_generation_config generation_config = self.generation_config generation_config = copy.deepcopy(generation_config) model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs self._validate_model_kwargs(model_kwargs.copy()) # 2. Cast input dtypes to tf.int32 unless they're floats (which happens for some image models) if inputs is not None: if isinstance(inputs, tf.Tensor) and inputs.dtype.is_floating: pass elif isinstance(inputs, np.ndarray) and np.issubdtype(inputs.dtype, np.floating): pass else: inputs = tf.cast(inputs, tf.int32) if model_kwargs.get("attention_mask") is not None: model_kwargs["attention_mask"] = tf.cast(model_kwargs["attention_mask"], tf.int32) if "decoder_input_ids" in model_kwargs: if ( isinstance(model_kwargs["decoder_input_ids"], tf.Tensor) and model_kwargs["decoder_input_ids"].dtype.is_floating ): pass elif isinstance(model_kwargs["decoder_input_ids"], np.ndarray) and np.issubdtype( model_kwargs["decoder_input_ids"].dtype, np.floating ): pass else: model_kwargs["decoder_input_ids"] = tf.cast(model_kwargs["decoder_input_ids"], tf.int32) # 3. Set generation parameters if not already defined logits_processor = logits_processor if logits_processor is not None else TFLogitsProcessorList() if generation_config.pad_token_id is None and generation_config.eos_token_id is not None: if model_kwargs.get("attention_mask") is None: logger.warning( "The attention mask and the pad token id were not set. As a consequence, you may observe " "unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results." ) eos_token_id = generation_config.eos_token_id if isinstance(eos_token_id, list): eos_token_id = eos_token_id[0] logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation.") generation_config.pad_token_id = eos_token_id use_xla = not tf.executing_eagerly() if use_xla and not self.supports_xla_generation: raise ValueError( "The selected model does not support Graph mode nor XLA generation (e.g. from tf.function())" ) # 4. Define model inputs inputs_tensor, model_input_name, model_kwargs = self._prepare_model_inputs( inputs, generation_config.bos_token_id, model_kwargs ) # inputs_ids now has to be defined and cannot be None anymore batch_size = shape_list(inputs_tensor)[0] # 5. Prepare other model kwargs model_kwargs["output_attentions"] = generation_config.output_attentions model_kwargs["output_hidden_states"] = generation_config.output_hidden_states model_kwargs["use_cache"] = generation_config.use_cache accepts_attention_mask = "attention_mask" in set(inspect.signature(self.call).parameters.keys()) requires_attention_mask = "encoder_outputs" not in model_kwargs if model_kwargs.get("attention_mask", None) is None and requires_attention_mask and accepts_attention_mask: model_kwargs["attention_mask"] = self._prepare_attention_mask_for_generation( inputs_tensor, generation_config.pad_token_id, generation_config.eos_token_id ) # decoder-only models should use left-padding for generation if not self.config.is_encoder_decoder: if generation_config.pad_token_id is not None and tf.math.reduce_any( inputs_tensor[:, -1] == generation_config.pad_token_id ): logger.warning( "A decoder-only architecture is being used, but right-padding was detected! For correct " "generation results, please set `padding_side='left'` when initializing the tokenizer." ) if self.config.is_encoder_decoder and "encoder_outputs" not in model_kwargs: # if model is encoder decoder encoder_outputs are created and added to `model_kwargs` model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation( inputs_tensor, model_kwargs, model_input_name ) # 6. Prepare model inputs which will be used for auto-regressive generation if self.config.is_encoder_decoder: input_ids, model_kwargs = self._prepare_decoder_input_ids_for_generation( batch_size=batch_size, model_input_name=model_input_name, model_kwargs=model_kwargs, decoder_start_token_id=generation_config.decoder_start_token_id, bos_token_id=generation_config.bos_token_id, ) else: input_ids = inputs_tensor if model_input_name == "input_ids" else model_kwargs.pop("input_ids") # 7. Prepare `max_length` depending on other stopping criteria. input_ids_seq_length = shape_list(input_ids)[-1] has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None if has_default_max_length and generation_config.max_new_tokens is None and generation_config.max_length == 20: # 20 is the default max_length of the generation config warnings.warn( f"Using the model-agnostic default `max_length` (={generation_config.max_length}) " "to control the generation length. recommend setting `max_new_tokens` to control the maximum length of the generation.", UserWarning, ) elif generation_config.max_new_tokens is not None: if not has_default_max_length and generation_config.max_length is not None: logger.warning( f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(=" f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. " "Please refer to the documentation for more information. " "(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)" ) generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length # If the input length is a tensor (i.e. dynamic length), skip length checks if not isinstance(input_ids_seq_length, tf.Tensor): if ( generation_config.min_length is not None and generation_config.min_length > generation_config.max_length ): raise ValueError( f"Unfeasable length constraints: the minimum length ({generation_config.min_length}) is larger" f" than the maximum length ({generation_config.max_length})" ) if input_ids_seq_length >= generation_config.max_length: input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids" logger.warning( f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to" f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider" " increasing`max_new_tokens`." ) # 8. determine generation mode is_contrastive_search_gen_mode = ( generation_config.top_k is not None and generation_config.top_k > 1 and generation_config.do_sample is False and generation_config.penalty_alpha is not None and generation_config.penalty_alpha > 0 ) is_greedy_gen_mode = ( not is_contrastive_search_gen_mode and (generation_config.num_beams == 1) and generation_config.do_sample is False ) is_beam_gen_mode = ( not is_contrastive_search_gen_mode and (generation_config.num_beams > 1) and generation_config.do_sample is False ) is_sample_gen_mode = (generation_config.num_beams == 1) and generation_config.do_sample is True is_beam_sample_gen_mode = (generation_config.num_beams > 1) and generation_config.do_sample is True # 9. prepare distribution pre_processing samplers logits_processor = self._get_logits_processor( generation_config=generation_config, input_ids_seq_length=input_ids_seq_length, logits_processor=logits_processor, ) # 10. go into different generation modes if is_greedy_gen_mode: if generation_config.num_return_sequences > 1: raise ValueError( f"num_return_sequences has to be 1, but is {generation_config.num_return_sequences} when doing" " greedy search." ) # 11. run greedy search return self.greedy_search( input_ids, max_length=generation_config.max_length, pad_token_id=generation_config.pad_token_id, eos_token_id=generation_config.eos_token_id, logits_processor=logits_processor, output_scores=generation_config.output_scores, return_dict_in_generate=generation_config.return_dict_in_generate, **model_kwargs, ) elif is_contrastive_search_gen_mode: if generation_config.num_return_sequences > 1: raise ValueError( f"num_return_sequences has to be 1, but is {generation_config.num_return_sequences} when doing" " contrastive search." ) # 11. run contrastive search return self.contrastive_search( input_ids, top_k=generation_config.top_k, penalty_alpha=generation_config.penalty_alpha, logits_processor=logits_processor, max_length=generation_config.max_length, pad_token_id=generation_config.pad_token_id, eos_token_id=generation_config.eos_token_id, output_scores=generation_config.output_scores, return_dict_in_generate=generation_config.return_dict_in_generate, **model_kwargs, ) elif is_sample_gen_mode: # 11. prepare logits warper logits_warper = self._get_logits_warper(generation_config=generation_config) # 12. expand input_ids with `num_return_sequences` additional sequences per batch input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids=input_ids, expand_size=generation_config.num_return_sequences, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs, ) # 13. run sample return self.sample( input_ids, logits_processor=logits_processor, logits_warper=logits_warper, max_length=generation_config.max_length, pad_token_id=generation_config.pad_token_id, eos_token_id=generation_config.eos_token_id, seed=seed, output_scores=generation_config.output_scores, return_dict_in_generate=generation_config.return_dict_in_generate, **model_kwargs, ) elif is_beam_gen_mode: if generation_config.num_beams < generation_config.num_return_sequences: raise ValueError( "Beam search decoding cannot return more sequences than it has beams. Please set num_beams >=" f" num_return_sequences, got {generation_config.num_beams} and" f" {generation_config.num_return_sequences} (respectivelly)" ) # 11. broadcast inputs to the desired number of beams input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids=input_ids, expand_size=generation_config.num_beams, is_encoder_decoder=self.config.is_encoder_decoder, expand_in_new_axis=True, **model_kwargs, ) # 12. run beam search return self.beam_search( input_ids, max_length=generation_config.max_length, pad_token_id=generation_config.pad_token_id, eos_token_id=generation_config.eos_token_id, length_penalty=generation_config.length_penalty, early_stopping=generation_config.early_stopping, logits_processor=logits_processor, output_scores=generation_config.output_scores, return_dict_in_generate=generation_config.return_dict_in_generate, num_return_sequences=generation_config.num_return_sequences, **model_kwargs, ) elif is_beam_sample_gen_mode: if generation_config.num_beams < generation_config.num_return_sequences: raise ValueError( "Beam search decoding cannot return more sequences than it has beams. Please set num_beams >=" f" num_return_sequences, got {generation_config.num_beams} and" f" {generation_config.num_return_sequences} (respectivelly)" ) # 11. prepare logits warper logits_warper = self._get_logits_warper(generation_config=generation_config) # 12. broadcast inputs to the desired number of beams input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids=input_ids, expand_size=generation_config.num_beams, is_encoder_decoder=self.config.is_encoder_decoder, expand_in_new_axis=True, **model_kwargs, ) # 13. run beam sample (beam search with sampling) return self.beam_search( input_ids, do_sample=True, max_length=generation_config.max_length, pad_token_id=generation_config.pad_token_id, eos_token_id=generation_config.eos_token_id, length_penalty=generation_config.length_penalty, early_stopping=generation_config.early_stopping, logits_processor=logits_processor, logits_warper=logits_warper, output_scores=generation_config.output_scores, return_dict_in_generate=generation_config.return_dict_in_generate, num_return_sequences=generation_config.num_return_sequences, **model_kwargs, ) def _prepare_attention_mask_for_generation( self, inputs: tf.Tensor, pad_token_id: Optional[int], eos_token_id: Optional[int], ) -> tf.Tensor: is_input_ids = len(inputs.shape) == 2 and inputs.dtype in (tf.int32, tf.int64) is_pad_token_in_inputs = (pad_token_id is not None) and tf.math.reduce_any(inputs == pad_token_id) is_pad_token_not_equal_to_eos_token_id = (eos_token_id is None) or (pad_token_id != eos_token_id) # Check if input is input_ids and padded -> only then is attention_mask defined if is_input_ids and is_pad_token_in_inputs and is_pad_token_not_equal_to_eos_token_id: return tf.cast(tf.math.not_equal(inputs, pad_token_id), dtype=tf.int32) else: return tf.ones(inputs.shape[:2], dtype=tf.int32) def _prepare_encoder_decoder_kwargs_for_generation( self, inputs_tensor: tf.Tensor, model_kwargs, model_input_name: Optional[str] = None ) -> Dict[str, Any]: # 1. get encoder and store encoder outputs encoder = self.get_encoder() # 2. prepare encoder args and encoder kwargs from model kwargs irrelevant_prefix = ["decoder_", "cross_attn", "use_cache"] encoder_kwargs = { argument: value for argument, value in model_kwargs.items() if not any(argument.startswith(p) for p in irrelevant_prefix) } encoder_signature = set(inspect.signature(encoder.call).parameters) encoder_accepts_wildcard = "kwargs" in encoder_signature or "model_kwargs" in encoder_signature if not encoder_accepts_wildcard: encoder_kwargs = { argument: value for argument, value in encoder_kwargs.items() if argument in encoder_signature } # 3. vision models don't use `attention_mask`. encoder_kwargs["return_dict"] = True encoder_kwargs[model_input_name] = inputs_tensor if model_input_name != self.main_input_name: # in Keras, the first input must always be passed encoder_kwargs[self.main_input_name] = None encoder_outputs = encoder(**encoder_kwargs) model_kwargs["encoder_outputs"] = encoder_outputs return model_kwargs def _prepare_decoder_input_ids_for_generation( self, batch_size: int, model_input_name: str, model_kwargs: Dict[str, tf.Tensor], decoder_start_token_id: int = None, bos_token_id: int = None, ) -> Tuple[tf.Tensor, Dict[str, tf.Tensor]]: """Prepares `decoder_input_ids` for generation with encoder-decoder models""" # 1. Check whether the user has defined `decoder_input_ids` manually. To facilitate in terms of input naming, # we also allow the user to pass it under `input_ids`, if the encoder does not use it as the main input. if model_kwargs is not None and "decoder_input_ids" in model_kwargs: decoder_input_ids = model_kwargs.pop("decoder_input_ids") elif "input_ids" in model_kwargs and model_input_name != "input_ids": decoder_input_ids = model_kwargs.pop("input_ids") else: decoder_input_ids = None # 2. Encoder-decoder models expect the `decoder_input_ids` to start with a special token. Let's ensure that. decoder_start_token_id = self._get_decoder_start_token_id(decoder_start_token_id, bos_token_id) decoder_input_ids_start = tf.ones((batch_size, 1), dtype=tf.int32) * decoder_start_token_id # no user input -> use decoder_start_token_id as decoder_input_ids if decoder_input_ids is None: decoder_input_ids = decoder_input_ids_start # user input but doesn't start with decoder_start_token_id -> prepend decoder_start_token_id (and adjust # decoder_attention_mask if provided) elif tf.reduce_all(decoder_input_ids[:, 0] != decoder_start_token_id): decoder_input_ids = tf.concat([decoder_input_ids_start, decoder_input_ids], axis=-1) if "decoder_attention_mask" in model_kwargs: decoder_attention_mask = model_kwargs["decoder_attention_mask"] decoder_attention_mask = tf.concat( (tf.ones_like(decoder_attention_mask)[:, :1], decoder_attention_mask), axis=-1, ) model_kwargs["decoder_attention_mask"] = decoder_attention_mask return decoder_input_ids, model_kwargs def _get_decoder_start_token_id(self, decoder_start_token_id: int = None, bos_token_id: int = None) -> int: # retrieve decoder_start_token_id for encoder-decoder models # fall back to bos_token_id if necessary decoder_start_token_id = ( decoder_start_token_id if decoder_start_token_id is not None else self.generation_config.decoder_start_token_id ) bos_token_id = bos_token_id if bos_token_id is not None else self.generation_config.bos_token_id if decoder_start_token_id is not None: return decoder_start_token_id elif bos_token_id is not None: return bos_token_id raise ValueError( "`decoder_start_token_id` or `bos_token_id` has to be defined for encoder-decoder generation." ) @staticmethod def _expand_inputs_for_generation( expand_size: int = 1, is_encoder_decoder: bool = False, input_ids: Optional[tf.Tensor] = None, expand_in_new_axis: bool = False, **model_kwargs, ) -> Tuple[tf.Tensor, Dict[str, Any]]: """ Expands tensors from [batch_size, ...] to [batch_size * expand_size, ...] or [batch_size, expand_size, ...], depending on `expand_in_new_axis`. Beam-based approaches expect this function to be used with `expand_in_new_axis=True` """ def _expand_tensor(tensor: tf.Tensor): if expand_in_new_axis: shape = shape_list(tensor) return tf.broadcast_to(tensor[:, None], (shape[0], expand_size) + tuple(shape[1:])) else: return tf.repeat(tensor, expand_size, axis=0) def _expand_dict_for_generation(dict_to_expand): for key in dict_to_expand: if dict_to_expand[key] is not None and isinstance(dict_to_expand[key], tf.Tensor): dict_to_expand[key] = _expand_tensor(dict_to_expand[key]) return dict_to_expand if input_ids is not None: input_ids = _expand_tensor(input_ids) model_kwargs = _expand_dict_for_generation(model_kwargs) if is_encoder_decoder: if model_kwargs.get("encoder_outputs") is None: raise ValueError("If `is_encoder_decoder` is True, make sure that `encoder_outputs` is defined.") model_kwargs["encoder_outputs"] = _expand_dict_for_generation(model_kwargs["encoder_outputs"]) return input_ids, model_kwargs def _prepare_model_inputs( self, inputs: Optional[tf.Tensor] = None, bos_token_id: Optional[int] = None, model_kwargs: Optional[Dict[str, tf.Tensor]] = None, ) -> Tuple[tf.Tensor, Optional[str], Dict[str, tf.Tensor]]: """ This function extracts the model-specific `inputs` for generation. """ # 1. retrieve all kwargs that are non-None or non-model input related. # some encoder-decoder models have different names for model and encoder if ( self.config.is_encoder_decoder and hasattr(self, "encoder") and hasattr(self.encoder, "main_input_name") and self.encoder.main_input_name != self.main_input_name ): input_name = self.encoder.main_input_name else: input_name = self.main_input_name model_kwargs = {k: v for k, v in model_kwargs.items() if v is not None or k != input_name} # 2. check whether model_input_name is passed as kwarg # if yes and `inputs` is None use kwarg inputs inputs_kwarg = model_kwargs.pop(input_name, None) if inputs_kwarg is not None and inputs is not None: raise ValueError( f"`inputs`: {inputs}` were passed alongside {input_name} which is not allowed. " f"Make sure to either pass {inputs} or {input_name}=..." ) elif inputs_kwarg is not None: inputs = inputs_kwarg # 3. In the presence of `inputs_embeds` for text models: # - decoder-only models should complain if the user attempts to pass `inputs_embeds`, but the model # doesn't have its forwarding implemented. `inputs_embeds` is kept in `model_kwargs` and can coexist with # input_ids (`inputs_embeds` will be used in the 1st generation step, as opposed to `input_ids`) # - encoder-decoder models should complain if the user attempts to pass `inputs_embeds` and `input_ids`, and # pull the former to inputs. It will be used in place of `input_ids` to get the encoder hidden states. if input_name == "input_ids" and "inputs_embeds" in model_kwargs: if not self.config.is_encoder_decoder: has_inputs_embeds_forwarding = "inputs_embeds" in set( inspect.signature(self.prepare_inputs_for_generation).parameters.keys() ) if not has_inputs_embeds_forwarding: raise ValueError( f"You passed `inputs_embeds` to `.generate()`, but the model class {self.__class__.__name__} " "doesn't have its forwarding implemented. See the GPT2 implementation for an example " "(https://github.com/huggingface/transformers/pull/21405), and feel free to open a PR with it!" ) # In this case, `input_ids` is moved to the `model_kwargs`, so a few automations (like the creation of # the attention mask) can rely on the actual model input. model_kwargs["input_ids"] = self._maybe_initialize_input_ids_for_generation( inputs, bos_token_id, model_kwargs=model_kwargs ) else: if inputs is not None: raise ValueError("You passed `inputs_embeds` and `input_ids` to `.generate()`. Please pick one.") inputs, input_name = model_kwargs["inputs_embeds"], "inputs_embeds" # 4. if `inputs` is still None, try to create `input_ids` from BOS token inputs = self._maybe_initialize_input_ids_for_generation(inputs, bos_token_id, model_kwargs) return inputs, input_name, model_kwargs def _maybe_initialize_input_ids_for_generation( self, inputs: Optional[tf.Tensor] = None, bos_token_id: Optional[int] = None, model_kwargs: Optional[Dict[str, tf.Tensor]] = None, ) -> tf.Tensor: """Initializes input ids for generation, if necessary.""" if inputs is not None: return inputs encoder_outputs = model_kwargs.get("encoder_outputs") if self.config.is_encoder_decoder and encoder_outputs is not None: # make dummy input_ids with value -100, as a sanity check ensuring that they won't be used for encoding shape = encoder_outputs.last_hidden_state.shape[:-1] return tf.ones(shape, dtype=tf.int32) * -100 if bos_token_id is None: raise ValueError("`bos_token_id` has to be defined when no `input_ids` are provided.") # If there is some tensor in `model_kwargs`, we can infer the batch size from it. This is helpful with # soft-prompting or in multimodal implementations built on top of decoder-only language models. batch_size = 1 for value in model_kwargs.values(): if isinstance(value, tf.Tensor): batch_size = value.shape[0] break return tf.ones((batch_size, 1), dtype=tf.int32) * bos_token_id @staticmethod def _extract_past_from_model_output(outputs: ModelOutput): past_key_values = None if "past_key_values" in outputs: past_key_values = outputs.past_key_values elif "mems" in outputs: past_key_values = outputs.mems elif "past_buckets_states" in outputs: past_key_values = outputs.past_buckets_states return past_key_values def _update_model_kwargs_for_generation( self, outputs: ModelOutput, model_kwargs: Dict[str, Any], is_encoder_decoder: bool = False ) -> Dict[str, Any]: # update past_key_values model_kwargs["past_key_values"] = self._extract_past_from_model_output(outputs) # update attention mask if not is_encoder_decoder: if "attention_mask" in model_kwargs: attention_mask = model_kwargs["attention_mask"] model_kwargs["attention_mask"] = tf.concat( [attention_mask, tf.ones((shape_list(attention_mask)[0], 1), dtype=tf.int32)], axis=-1 ) return model_kwargs def _update_model_kwargs_for_xla_generation( self, model_outputs: ModelOutput, model_kwargs: Dict[str, Any], cur_len: int, max_length: int, batch_size: int, is_encoder_decoder: bool = False, batch_axis: int = 0, ): def _initialize_attention(model_kwargs, num_padding_values, is_encoder_decoder): """initializes the appropriate attention mask -- encoder-decoder models use `decoder_attention_mask`""" if is_encoder_decoder: # One 1 for decoder_start_token_id, 0s for the currently-unfilled locations in the past_key_values tensor, # 1s for the actual input_ids decoder_attention_mask = tf.concat( [ tf.ones((batch_size, 1), dtype=tf.int32), tf.zeros((batch_size, num_padding_values), dtype=tf.int32), tf.ones((batch_size, 1), dtype=tf.int32), ], axis=1, ) mask = {"decoder_attention_mask": decoder_attention_mask} else: attention_mask = model_kwargs.pop("attention_mask") # 0s for the currently-unfilled locations in the past_key_values tensor, 1s for the actual input_ids attention_mask = tf.concat( [ attention_mask, tf.zeros((batch_size, num_padding_values), dtype=attention_mask.dtype), tf.ones((batch_size, 1), dtype=attention_mask.dtype), ], axis=1, ) mask = {"attention_mask": attention_mask} return mask def _update_attention(model_kwargs, new_past_index, is_encoder_decoder): """updates the appropriate attention mask -- encoder-decoder models use `decoder_attention_mask`""" update_start = tf.constant([0, 1], dtype=tf.int32) * new_past_index if is_encoder_decoder: decoder_attention_mask = model_kwargs.pop("decoder_attention_mask") decoder_attention_mask_update_slice = tf.ones((batch_size, 1), dtype=decoder_attention_mask.dtype) decoder_attention_mask = dynamic_update_slice( decoder_attention_mask, decoder_attention_mask_update_slice, update_start ) mask = {"decoder_attention_mask": decoder_attention_mask} else: attention_mask = model_kwargs.pop("attention_mask") attention_mask_update_slice = tf.ones((batch_size, 1), dtype=attention_mask.dtype) attention_mask = dynamic_update_slice(attention_mask, attention_mask_update_slice, update_start) mask = {"attention_mask": attention_mask} return mask def _initialize_past(past_key_values, num_padding_values, batch_axis): """initialize past_key_values with zeros -- the structure depends on `batch_axis`""" if batch_axis == 0: padding_values = tf.constant([[0, 0], [0, 0], [0, num_padding_values], [0, 0]], dtype=tf.int32) new_past = () for past_layer in past_key_values: new_past_layer = list(past_layer) for i in range(len(new_past_layer[:2])): new_past_layer[i] = tf.pad(past_layer[i], padding_values) new_past += (tuple(new_past_layer),) else: padding_values = tf.scatter_nd(indices=[[3, 1]], updates=[num_padding_values], shape=(5, 2)) new_past = list(past_key_values) for i in range(len(past_key_values)): new_past[i] = tf.pad(past_key_values[i], padding_values) return new_past def _update_past(past_key_values, new_past_index, batch_axis): if batch_axis == 0: slice_start_base = tf.constant([0, 0, 1, 0]) new_past = () for past_layer in past_key_values: new_past_layer = list(past_layer) for i in range(len(new_past_layer[:2])): update_slice = past_layer[i][:, :, -1:] # Write the last slice to the first open location in the padded past_key_values array # and then truncate the last slice off the array new_past_layer[i] = dynamic_update_slice( past_layer[i][:, :, :-1], update_slice, slice_start_base * new_past_index ) new_past += (tuple(new_past_layer),) else: slice_start_base = tf.constant([0, 0, 0, 1, 0]) new_past = [None for _ in range(len(past_key_values))] for i in range(len(past_key_values)): update_slice = past_key_values[i][:, :, :, -1:] # Write the last slice to the first open location in the padded past_key_values array # and then truncate the last slice off the array new_past[i] = dynamic_update_slice( past_key_values[i][:, :, :, :-1], update_slice, slice_start_base * new_past_index ) return new_past past_key_values = self._extract_past_from_model_output(model_outputs) if past_key_values is None: raise ValueError( "No known `past_key_values variable` found in model outputs (model outputs keys:" f" {list(model_outputs.keys())})" ) is_past_initialized = model_kwargs.pop("past_key_values", None) is not None if not is_past_initialized: # The padded version of `past_key_values` has a length of `max_length - 1`, as `past_key_values` holds information relative to # previous autoregressive generation steps (step 0 has no past_key_values, step 1 has 1 past_key_values value, ..., the last step # has `max_length - 1` past_key_values values). num_padding_values = max_length - cur_len - 1 mask = _initialize_attention(model_kwargs, num_padding_values, is_encoder_decoder) new_past = _initialize_past(past_key_values, num_padding_values, batch_axis) else: # The new index of past_key_values to be filled corresponds to the current length of the sequence, with two # subtractions: -1 because past_key_values holds information regarding previous generation steps (read comment above) # and -1 again because in an array the index is the length of the array minus 1. new_past_index = cur_len - 2 mask = _update_attention(model_kwargs, new_past_index, is_encoder_decoder) new_past = _update_past(past_key_values, new_past_index, batch_axis) # sets the updated variables (mask and past_key_values) model_kwargs.update(mask) model_kwargs["past_key_values"] = tuple(new_past) return model_kwargs def _get_logits_warper( self, generation_config: GenerationConfig, ) -> TFLogitsProcessorList: """ This class returns a [`TFLogitsProcessorList`] list object that contains all relevant [`TFLogitsWarper`] instances used for multinomial sampling. """ # instantiate warpers list warpers = TFLogitsProcessorList() # In beam methods, we need to keep at least one non-eos token to explore continuations that might have a # better score (i.e. keep len(generation_config.eos_token_id) + 1) if generation_config.num_beams > 1: if isinstance(generation_config.eos_token_id, list): min_tokens_to_keep = len(generation_config.eos_token_id) + 1 else: min_tokens_to_keep = 2 else: min_tokens_to_keep = 1 if generation_config.temperature is not None and generation_config.temperature != 1.0: warpers.append(TFTemperatureLogitsWarper(generation_config.temperature)) if generation_config.top_k is not None and generation_config.top_k != 0: warpers.append(TFTopKLogitsWarper(top_k=generation_config.top_k, min_tokens_to_keep=min_tokens_to_keep)) if generation_config.top_p is not None and generation_config.top_p < 1.0: warpers.append(TFTopPLogitsWarper(top_p=generation_config.top_p, min_tokens_to_keep=min_tokens_to_keep)) return warpers def _get_logits_processor( self, generation_config: GenerationConfig, input_ids_seq_length: int, logits_processor: Optional[TFLogitsProcessorList], ) -> TFLogitsProcessorList: """ This class returns a [`TFLogitsProcessorList`] list object that contains all relevant [`TFLogitsProcessor`] instances used to modify the scores of the language model head. """ processors = TFLogitsProcessorList() # instantiate processors list if generation_config.repetition_penalty is not None and generation_config.repetition_penalty != 1.0: processors.append(TFRepetitionPenaltyLogitsProcessor(penalty=generation_config.repetition_penalty)) if generation_config.no_repeat_ngram_size is not None and generation_config.no_repeat_ngram_size > 0: processors.append(TFNoRepeatNGramLogitsProcessor(generation_config.no_repeat_ngram_size)) if generation_config.bad_words_ids is not None: processors.append( TFNoBadWordsLogitsProcessor(generation_config.bad_words_ids, generation_config.eos_token_id) ) if ( generation_config.min_length is not None and generation_config.eos_token_id is not None and generation_config.min_length > 0 ): processors.append(TFMinLengthLogitsProcessor(generation_config.min_length, generation_config.eos_token_id)) if generation_config.forced_bos_token_id is not None: processors.append(TFForcedBOSTokenLogitsProcessor(generation_config.forced_bos_token_id)) if generation_config.forced_eos_token_id is not None: processors.append( TFForcedEOSTokenLogitsProcessor(generation_config.max_length, generation_config.forced_eos_token_id) ) if generation_config.suppress_tokens is not None: processors.append(TFSuppressTokensLogitsProcessor(generation_config.suppress_tokens)) if generation_config.begin_suppress_tokens is not None: begin_index = input_ids_seq_length begin_index = ( begin_index if (input_ids_seq_length > 1 or generation_config.forced_bos_token_id is None) else begin_index + 1 ) if generation_config.forced_decoder_ids is not None: begin_index += generation_config.forced_decoder_ids[-1][ 0 ] # generation starts after the last token that is forced processors.append( TFSuppressTokensAtBeginLogitsProcessor(generation_config.begin_suppress_tokens, begin_index) ) if generation_config.forced_decoder_ids is not None: processors.append(TFForceTokensLogitsProcessor(generation_config.forced_decoder_ids)) processors = self._merge_criteria_processor_list(processors, logits_processor) return processors def _merge_criteria_processor_list( self, default_list: TFLogitsProcessorList, custom_list: TFLogitsProcessorList, ) -> TFLogitsProcessorList: if len(custom_list) == 0: return default_list for default in default_list: for custom in custom_list: if type(custom) is type(default): object_type = "logits processor" raise ValueError( f"A custom {object_type} of type {type(custom)} with values {custom} has been passed to" f" `generate`, but it has already been created with the values {default}. {default} has been" " created by passing the corresponding arguments to generate or by the model's config default" f" values. If you just want to change the default values of {object_type} consider passing" f" them as arguments to `generate` instead of using a custom {object_type}." ) default_list.extend(custom_list) return default_list def greedy_search( self, input_ids: tf.Tensor, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, logits_processor: Optional[TFLogitsProcessorList] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, **model_kwargs, ) -> Union[TFGreedySearchOutput, tf.Tensor]: r""" Generates sequences for models with a language modeling head using greedy decoding. Parameters: input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. logits_processor (`TFLogitsProcessorList`, *optional*): An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. max_length (`int`, *optional*, defaults to 20): The maximum length of the sequence to be generated. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`Union[int, List[int]]`, *optional*): The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to `False`): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to `False`): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to `False`): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. model_kwargs: Additional model specific keyword arguments will be forwarded to the `call` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`~generation.TFGreedySearchDecoderOnlyOutput`], [`~generation.TFGreedySearchEncoderDecoderOutput`] or `tf.Tensor`: A `tf.Tensor` containing the generated tokens (default behaviour) or a [`~generation.TFGreedySearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation.TFGreedySearchEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. Examples: ```python >>> from transformers import ( ... AutoTokenizer, ... TFAutoModelForCausalLM, ... TFLogitsProcessorList, ... TFMinLengthLogitsProcessor, ... ) >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") >>> model = TFAutoModelForCausalLM.from_pretrained("openai-community/gpt2") >>> # set pad_token_id to eos_token_id because GPT2 does not have a PAD token >>> model.generation_config.pad_token_id = model.generation_config.eos_token_id >>> input_prompt = "Today is a beautiful day, and" >>> input_ids = tokenizer(input_prompt, return_tensors="tf").input_ids >>> # instantiate logits processors >>> logits_processor = TFLogitsProcessorList( ... [ ... TFMinLengthLogitsProcessor(15, eos_token_id=model.generation_config.eos_token_id), ... ] ... ) >>> outputs = model.greedy_search(input_ids, logits_processor=logits_processor) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ["Today is a beautiful day, and I'm so happy to be here. I'm so happy to"] ```""" # 1. init greedy_search values logits_processor = logits_processor if logits_processor is not None else TFLogitsProcessorList() max_length = max_length if max_length is not None else self.generation_config.max_length pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id if isinstance(eos_token_id, int): eos_token_id = [eos_token_id] output_scores = output_scores if output_scores is not None else self.generation_config.output_scores output_attentions = ( output_attentions if output_attentions is not None else self.generation_config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.generation_config.return_dict_in_generate ) use_cache = model_kwargs.pop("use_cache", self.generation_config.use_cache) use_xla = not tf.executing_eagerly() # TODO (Joao): fix cache format or find programatic way to detect cache index # GPT2 and other models has a slightly different cache structure, with a different batch axis model_name = str(self.decoder) if "EncoderDecoder" in str(self) else str(self) cache_batch_axis = 1 if any(model_prefix in model_name for model_prefix in ("TFGPT2", "TFCTRL")) else 0 # some models, like XLNet, need more than the last token in the presence of past_key_values needs_full_input = "use_mems" in set(inspect.signature(self.prepare_inputs_for_generation).parameters.keys()) # 2. init `attentions`, `hidden_states`, and `scores` tuples scores = [] if (return_dict_in_generate and output_scores) else None decoder_attentions = [] if (return_dict_in_generate and output_attentions) else None cross_attentions = [] if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = [] if (return_dict_in_generate and output_hidden_states) else None # 3. init tensors to use for "xla-compileable" generate function batch_size, cur_len = shape_list(input_ids) # initialize `generated` (`input_ids` padded with `pad_token_id`), `finished_sequences` input_ids_padding = tf.ones((batch_size, max_length - cur_len), dtype=tf.int32) * (pad_token_id or 0) generated = tf.concat([input_ids, input_ids_padding], axis=-1) finished_sequences = tf.zeros((batch_size,), dtype=tf.bool) # 4. define "xla-compile-able" stop-condition and auto-regressive function # define condition fn def greedy_search_cond_fn(generated, finished_sequences, cur_len, model_kwargs): """state termination condition fn.""" return ~tf.reduce_all(finished_sequences) # define condition fn def greedy_search_body_fn(generated, finished_sequences, cur_len, model_kwargs): """state update fn.""" if model_kwargs.get("past_key_values") is None or needs_full_input: input_ids = generated[:, :cur_len] else: input_ids = tf.expand_dims(generated[:, cur_len - 1], -1) model_inputs = self.prepare_inputs_for_generation(input_ids, use_cache=use_cache, **model_kwargs) # forward pass to get next token logits model_outputs = self( **model_inputs, return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) next_token_logits = model_outputs.logits[:, -1] # pre-process distribution next_tokens_scores = logits_processor(generated, next_token_logits, cur_len) # Store scores, attentions and hidden_states when required if not use_xla and return_dict_in_generate: if output_scores: scores.append(next_tokens_scores) if output_attentions and self.config.is_encoder_decoder: decoder_attentions.append(model_outputs.decoder_attentions) elif output_attentions and not self.config.is_encoder_decoder: decoder_attentions.append(model_outputs.attentions) if self.config.is_encoder_decoder: cross_attentions.append(model_outputs.cross_attentions) if output_hidden_states and self.config.is_encoder_decoder: decoder_hidden_states.append(model_outputs.decoder_hidden_states) elif output_hidden_states and self.config.is_encoder_decoder: decoder_hidden_states.append(model_outputs.hidden_states) # argmax next_tokens = tf.argmax(next_tokens_scores, axis=-1, output_type=tf.int32) if eos_token_id is not None: if pad_token_id is None: raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.") unfinished_seq = 1 - tf.cast(finished_sequences, tf.int32) next_tokens = next_tokens * unfinished_seq + pad_token_id * (1 - unfinished_seq) next_token_is_eos = tf.math.reduce_any( tf.equal( tf.broadcast_to(next_tokens, (len(eos_token_id), batch_size)), tf.expand_dims(eos_token_id, -1) ), axis=0, ) finished_sequences = finished_sequences | next_token_is_eos # update `generated` and `cur_len` update_indices = tf.stack([tf.range(batch_size), tf.broadcast_to(cur_len, [batch_size])], axis=-1) generated = tf.tensor_scatter_nd_update(tensor=generated, indices=update_indices, updates=next_tokens) cur_len += 1 # update model_kwargs if use_xla: model_kwargs = self._update_model_kwargs_for_xla_generation( model_outputs=model_outputs, model_kwargs=model_kwargs, cur_len=cur_len, max_length=max_length, batch_size=batch_size, is_encoder_decoder=self.config.is_encoder_decoder, batch_axis=cache_batch_axis, ) else: model_kwargs = self._update_model_kwargs_for_generation( model_outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) # if we don't cache past_key_values key values we need the whole input if model_kwargs.get("past_key_values", None) is None: # let's throw out `past_key_values` since we don't want `None` tensors model_kwargs.pop("past_key_values", None) return generated, finished_sequences, cur_len, model_kwargs # 5. run generation # 1st generation step has to be run before to initialize `past_key_values` generated, finished_sequences, cur_len, model_kwargs = greedy_search_body_fn( generated, finished_sequences, cur_len, model_kwargs ) # 2-to-n generation steps can then be run in autoregressive fashion # only in case 1st generation step does NOT yield EOS token though maximum_iterations = max_length - cur_len generated, _, cur_len, _ = tf.while_loop( greedy_search_cond_fn, greedy_search_body_fn, (generated, finished_sequences, cur_len, model_kwargs), maximum_iterations=maximum_iterations, ) # 6. prepare outputs if not use_xla: # cut for backward compatibility generated = generated[:, :cur_len] if return_dict_in_generate: if self.config.is_encoder_decoder: # if model is an encoder-decoder, retrieve encoder attention weights # and hidden states encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) scores = tuple(scores) if scores is not None else None decoder_attentions = tuple(decoder_attentions) if decoder_attentions is not None else None cross_attentions = tuple(cross_attentions) if cross_attentions is not None else None decoder_hidden_states = tuple(decoder_hidden_states) if decoder_hidden_states is not None else None return TFGreedySearchEncoderDecoderOutput( sequences=generated, scores=scores, encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return TFGreedySearchDecoderOnlyOutput( sequences=generated, scores=scores, attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return generated def sample( self, input_ids: tf.Tensor, logits_processor: Optional[TFLogitsProcessorList] = None, logits_warper: Optional[TFLogitsProcessorList] = None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, seed: Optional[Tuple[int, int]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, **model_kwargs, ) -> Union[TFSampleOutput, tf.Tensor]: r""" Generates sequences for models with a language modeling head using multinomial sampling. Parameters: input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. logits_processor (`TFLogitsProcessorList`, *optional*): An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. logits_warper (`TFLogitsProcessorList`, *optional*): An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsWarper`] used to warp the prediction score distribution of the language modeling head applied before multinomial sampling at each generation step. max_length (`int`, *optional*, defaults to 20): The maximum length of the sequence to be generated. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`Union[int, List[int]]`, *optional*): The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. seed (`List[int]`, *optional*): Random seed to control sampling, containing two integers, used when `do_sample` is `True`. See the `seed` argument from stateless functions in `tf.random`. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to `False`): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to `False`): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to `False`): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. model_kwargs: Additional model specific kwargs will be forwarded to the `call` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`~generation.TFSampleDecoderOnlyOutput`], [`~generation.TFSampleEncoderDecoderOutput`] or `tf.Tensor`: A `tf.Tensor` containing the generated tokens (default behaviour) or a [`~generation.TFSampleDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation.TFSampleEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. Examples: ```python >>> import tensorflow as tf >>> from transformers import ( ... AutoTokenizer, ... TFAutoModelForCausalLM, ... TFLogitsProcessorList, ... TFMinLengthLogitsProcessor, ... TFTopKLogitsWarper, ... TFTemperatureLogitsWarper, ... ) >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") >>> model = TFAutoModelForCausalLM.from_pretrained("openai-community/gpt2") >>> # set pad_token_id to eos_token_id because GPT2 does not have a EOS token >>> model.generation_config.pad_token_id = model.generation_config.eos_token_id >>> input_prompt = "Today is a beautiful day, and" >>> input_ids = tokenizer(input_prompt, return_tensors="tf").input_ids >>> # instantiate logits processors >>> logits_processor = TFLogitsProcessorList( ... [ ... TFMinLengthLogitsProcessor(15, eos_token_id=model.generation_config.eos_token_id), ... ] ... ) >>> # instantiate logits processors >>> logits_warper = TFLogitsProcessorList( ... [ ... TFTopKLogitsWarper(50), ... TFTemperatureLogitsWarper(0.7), ... ] ... ) >>> tf.random.set_seed(0) >>> outputs = model.sample(input_ids, logits_processor=logits_processor, logits_warper=logits_warper) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Today is a beautiful day, and I love my country. But when I look at Donald Trump,'] ```""" # 1. init greedy_search values logits_processor = logits_processor if logits_processor is not None else TFLogitsProcessorList() logits_warper = logits_warper if logits_warper is not None else TFLogitsProcessorList() max_length = max_length if max_length is not None else self.generation_config.max_length pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id if isinstance(eos_token_id, int): eos_token_id = [eos_token_id] output_scores = output_scores if output_scores is not None else self.generation_config.output_scores output_attentions = ( output_attentions if output_attentions is not None else self.generation_config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.generation_config.return_dict_in_generate ) use_cache = model_kwargs.pop("use_cache", self.generation_config.use_cache) use_xla = not tf.executing_eagerly() # TODO (Joao): fix cache format or find programatic way to detect cache index # GPT2 and other models has a slightly different cache structure, with a different batch axis model_name = str(self.decoder) if "EncoderDecoder" in str(self) else str(self) cache_batch_axis = 1 if any(model_prefix in model_name for model_prefix in ("TFGPT2", "TFCTRL")) else 0 # some models, like XLNet, need more than the last token in the presence of past_key_values needs_full_input = "use_mems" in set(inspect.signature(self.prepare_inputs_for_generation).parameters.keys()) # 2. init `attentions`, `hidden_states`, and `scores` tuples scores = [] if (return_dict_in_generate and output_scores) else None decoder_attentions = [] if (return_dict_in_generate and output_attentions) else None cross_attentions = [] if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = [] if (return_dict_in_generate and output_hidden_states) else None # 3. init tensors to use for "xla-compileable" generate function batch_size, cur_len = shape_list(input_ids) # initialize `generated` (pre-populated with `pad_token_id`), `finished_sequences` input_ids_padding = tf.ones((batch_size, max_length - cur_len), dtype=tf.int32) * (pad_token_id or 0) generated = tf.concat([input_ids, input_ids_padding], axis=-1) finished_sequences = tf.zeros((batch_size,), dtype=tf.bool) # 4. define "xla-compile-able" stop-condition and auto-regressive function def sample_cond_fn(generated, finished_sequences, cur_len, model_kwargs): return ~tf.reduce_all(finished_sequences) def sample_body_fn(generated, finished_sequences, cur_len, model_kwargs): if model_kwargs.get("past_key_values") is None or needs_full_input: input_ids = generated[:, :cur_len] else: input_ids = tf.expand_dims(generated[:, cur_len - 1], -1) model_inputs = self.prepare_inputs_for_generation(input_ids, use_cache=use_cache, **model_kwargs) # forward pass to get next token logits model_outputs = self( **model_inputs, return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) next_token_logits = model_outputs.logits[:, -1] # pre-process distribution next_tokens_scores = logits_processor(generated, next_token_logits, cur_len) next_tokens_scores = logits_warper(generated, next_tokens_scores, cur_len) # Store scores, attentions and hidden_states when required if not use_xla and return_dict_in_generate: if output_scores: scores.append(next_tokens_scores) if output_attentions and self.config.is_encoder_decoder: decoder_attentions.append(model_outputs.decoder_attentions) elif output_attentions and not self.config.is_encoder_decoder: decoder_attentions.append(model_outputs.attentions) if self.config.is_encoder_decoder: cross_attentions.append(model_outputs.cross_attentions) if output_hidden_states and self.config.is_encoder_decoder: decoder_hidden_states.append(model_outputs.decoder_hidden_states) elif output_hidden_states and self.config.is_encoder_decoder: decoder_hidden_states.append(model_outputs.hidden_states) # sample if seed is not None: sample_seed = seed else: sample_seed = tf.experimental.numpy.random.randint(tf.int32.min, tf.int32.max, (2,), dtype=tf.int32) next_tokens = tf.squeeze( tf.random.stateless_categorical( logits=next_tokens_scores, num_samples=1, seed=sample_seed, dtype=tf.int32 ), axis=1, ) if eos_token_id is not None: if pad_token_id is None: raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.") unfinished_seq = 1 - tf.cast(finished_sequences, tf.int32) next_tokens = next_tokens * unfinished_seq + pad_token_id * (1 - unfinished_seq) next_token_is_eos = tf.math.reduce_any( tf.equal( tf.broadcast_to(next_tokens, (len(eos_token_id), batch_size)), tf.expand_dims(eos_token_id, -1) ), axis=0, ) finished_sequences = finished_sequences | next_token_is_eos # update `generated` and `cur_len` update_indices = tf.stack([tf.range(batch_size), tf.broadcast_to(cur_len, [batch_size])], axis=-1) generated = tf.tensor_scatter_nd_update(tensor=generated, indices=update_indices, updates=next_tokens) cur_len += 1 # update model_kwargs if use_xla: model_kwargs = self._update_model_kwargs_for_xla_generation( model_outputs=model_outputs, model_kwargs=model_kwargs, cur_len=cur_len, max_length=max_length, batch_size=batch_size, is_encoder_decoder=self.config.is_encoder_decoder, batch_axis=cache_batch_axis, ) else: model_kwargs = self._update_model_kwargs_for_generation( model_outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) # if we don't cache past_key_values key values we need the whole input if model_kwargs.get("past_key_values", None) is None: # let's throw out `past_key_values` since we don't want `None` tensors model_kwargs.pop("past_key_values", None) return generated, finished_sequences, cur_len, model_kwargs # 5. run generation # 1st generation step has to be run before to initialize `past_key_values` generated, finished_sequences, cur_len, model_kwargs = sample_body_fn( generated, finished_sequences, cur_len, model_kwargs ) # 2-to-n generation steps can then be run in autoregressive fashion # only in case 1st generation step does NOT yield EOS token though maximum_iterations = max_length - cur_len generated, _, cur_len, _ = tf.while_loop( sample_cond_fn, sample_body_fn, (generated, finished_sequences, cur_len, model_kwargs), maximum_iterations=maximum_iterations, ) # 6. prepare outputs if not use_xla: # cut for backward compatibility generated = generated[:, :cur_len] if return_dict_in_generate: if self.config.is_encoder_decoder: # if model is an encoder-decoder, retrieve encoder attention weights # and hidden states encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) scores = tuple(scores) if scores is not None else None decoder_attentions = tuple(decoder_attentions) if decoder_attentions is not None else None cross_attentions = tuple(cross_attentions) if cross_attentions is not None else None decoder_hidden_states = tuple(decoder_hidden_states) if decoder_hidden_states is not None else None return TFSampleEncoderDecoderOutput( sequences=generated, scores=scores, encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return TFSampleDecoderOnlyOutput( sequences=generated, scores=scores, attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return generated @staticmethod def _gather_beams(nested, beam_indices, batch_axis=0): """Gathers the beam slices indexed by beam_indices into new beam array.""" def gather_fn(tensor): if batch_axis > 0: # pushes all dimentions before the batch to the end, so we get (batch, beam_id, ...) perm = tf.concat((tf.range(tf.rank(tensor))[batch_axis:], tf.range(batch_axis)), axis=0) tensor = tf.transpose(tensor, perm=perm) gathered_tensor = tf.gather(params=tensor, indices=beam_indices, axis=1, batch_dims=1) if batch_axis > 0: # transposes back to the original dimensions perm = tf.concat((tf.range(tf.rank(tensor))[batch_axis:], tf.range(batch_axis)), axis=0) perm = tf.math.invert_permutation(perm) gathered_tensor = tf.transpose(gathered_tensor, perm=perm) return gathered_tensor return tf.nest.map_structure(gather_fn, nested) def beam_search( self, input_ids: tf.Tensor, do_sample: bool = False, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, length_penalty: Optional[float] = None, early_stopping: Optional[Union[bool, str]] = None, logits_processor: Optional[TFLogitsProcessorList] = None, logits_warper: Optional[TFLogitsProcessorList] = None, num_return_sequences: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, **model_kwargs, ) -> Union[TFBeamSearchOutput, TFBeamSampleOutput, tf.Tensor]: r""" Generates sequences for models with a language modeling head using beam search. If `do_sample` is `False`, uses a greedy approach, otherwise does multinomial sampling without replacement. Parameters: input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. do_sample (`bool`, *optional*, defaults to `False`): Whether or not to use sampling ; use greedy decoding otherwise. max_length (`int`, *optional*, defaults to 20): The maximum length of the sequence to be generated. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`Union[int, List[int]]`, *optional*): The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. length_penalty (`float`, *optional*, defaults to 1.0): Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while `length_penalty` < 0.0 encourages shorter sequences. early_stopping (`bool` or `str`, *optional*, defaults to `False`): Controls the stopping condition for beam-based methods, like beam-search. It accepts the following values: `True`, where the generation stops as soon as there are `num_beams` complete candidates; `False`, where an heuristic is applied and the generation stops when is it very unlikely to find better candidates; `"never"`, where the beam search procedure only stops when there cannot be better candidates (canonical beam search algorithm). logits_processor (`[TFLogitsProcessorList]`, *optional*): An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. logits_warper (`TFLogitsProcessorList`, *optional*): An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsWarper`] used to warp the prediction score distribution of the language modeling head applied before multinomial sampling at each generation step. num_return_sequences(`int`, *optional*, defaults to 1): The number of independently computed returned sequences for each element in the batch. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to `False`): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to `False`): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. model_kwargs: Additional model specific kwargs will be forwarded to the `call` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`~generation.TFBeamSearchDecoderOnlyOutput`], [`~generation.TFBeamSearchEncoderDecoderOutput`] or `tf.Tensor`: A `tf.Tensor` containing the generated tokens (default behaviour) or a [`~generation.TFBeamSearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation.TFBeamSearchEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. Examples: ```python >>> from transformers import ( ... AutoTokenizer, ... TFAutoModelForSeq2SeqLM, ... TFLogitsProcessorList, ... TFMinLengthLogitsProcessor, ... ) >>> import tensorflow as tf >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-base") >>> model = TFAutoModelForSeq2SeqLM.from_pretrained("google-t5/t5-base") >>> encoder_input_str = "translate English to German: How old are you?" >>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="tf").input_ids >>> # lets run beam search using 3 beams >>> num_beams = 3 >>> # define decoder start token ids >>> input_ids = tf.ones((1, num_beams, 1), dtype=tf.int32) >>> input_ids = input_ids * model.generation_config.decoder_start_token_id >>> # add encoder_outputs to model keyword arguments >>> encoder_outputs = model.get_encoder()(encoder_input_ids, return_dict=True) >>> encoder_outputs.last_hidden_state = tf.repeat( ... tf.expand_dims(encoder_outputs.last_hidden_state, axis=0), num_beams, axis=1 ... ) >>> model_kwargs = {"encoder_outputs": encoder_outputs} >>> # instantiate logits processors >>> logits_processor = TFLogitsProcessorList( ... [TFMinLengthLogitsProcessor(5, eos_token_id=model.generation_config.eos_token_id)] ... ) >>> outputs = model.beam_search(input_ids, logits_processor=logits_processor, **model_kwargs) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Wie alt bist du?'] ```""" def flatten_beam_dim(tensor, batch_axis=0): """Flattens the first two dimensions of a non-scalar array.""" shape = shape_list(tensor) return tf.reshape( tensor, shape[:batch_axis] + [shape[batch_axis] * shape[batch_axis + 1]] + shape[batch_axis + 2 :], ) def unflatten_beam_dim(tensor, num_beams, batch_axis=0): """Unflattens the first, flat batch*beam dimension of a non-scalar array.""" shape = shape_list(tensor) return tf.reshape(tensor, shape[:batch_axis] + [-1, num_beams] + shape[batch_axis + 1 :]) # 1. init beam_search values logits_processor = logits_processor if logits_processor is not None else TFLogitsProcessorList() logits_warper = logits_warper if logits_warper is not None else TFLogitsProcessorList() max_length = max_length if max_length is not None else self.generation_config.max_length pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id if isinstance(eos_token_id, int): eos_token_id = [eos_token_id] num_return_sequences = ( num_return_sequences if num_return_sequences is not None else self.generation_config.num_return_sequences ) output_attentions = ( output_attentions if output_attentions is not None else self.generation_config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states ) output_scores = output_scores if output_scores is not None else self.generation_config.output_scores return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.generation_config.return_dict_in_generate ) length_penalty = length_penalty if length_penalty is not None else self.generation_config.length_penalty early_stopping = early_stopping if early_stopping is not None else self.generation_config.early_stopping use_cache = model_kwargs.pop("use_cache", self.generation_config.use_cache) use_xla = not tf.executing_eagerly() # TODO (Joao): fix cache format or find programatic way to detect cache index # GPT2 and other models has a slightly different cache structure, with a different batch axis model_name = str(self.decoder) if "EncoderDecoder" in str(self) else str(self) cache_batch_axis = 1 if any(model_prefix in model_name for model_prefix in ("TFGPT2", "TFCTRL")) else 0 # some models, like XLNet, need more than the last token in the presence of past_key_values needs_full_input = "use_mems" in set(inspect.signature(self.prepare_inputs_for_generation).parameters.keys()) # 2. init `attentions`, `hidden_states`, and `scores` tuples all_scores = [] if (return_dict_in_generate and output_scores) else None decoder_attentions = [] if (return_dict_in_generate and output_attentions) else None cross_attentions = [] if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = [] if (return_dict_in_generate and output_hidden_states) else None # 3. init tensors to use for "xla-compileable" generate function batch_size, num_beams, cur_len = shape_list(input_ids) # store the prompt length of decoder decoder_prompt_len = cur_len # per batch, beam-item holding current token in loop, pre-populated with `pad_token_id` input_ids_padding = tf.ones((batch_size, num_beams, max_length - cur_len), dtype=tf.int32) * ( pad_token_id or 0 ) running_sequences = tf.concat([input_ids, input_ids_padding], axis=-1) sequences = tf.ones((batch_size, num_beams, max_length), dtype=tf.int32) * (pad_token_id or 0) # per batch,beam-item state bit indicating if sentence has finished. is_sent_finished = tf.zeros((batch_size, num_beams), dtype=tf.bool) # per batch, beam-item score, logprobs running_scores = tf.tile( tf.expand_dims(tf.convert_to_tensor([0.0] + [-1.0e9] * (num_beams - 1)), axis=0), [batch_size, 1] ) scores = tf.ones((batch_size, num_beams)) * -1.0e9 # per batch beam indices running_beam_indices = tf.ones((batch_size, num_beams, max_length - decoder_prompt_len), dtype=tf.int32) * -1 beam_indices = tf.ones((batch_size, num_beams, max_length - decoder_prompt_len), dtype=tf.int32) * -1 # flatten beam dim if "encoder_outputs" in model_kwargs: model_kwargs["encoder_outputs"]["last_hidden_state"] = flatten_beam_dim( model_kwargs["encoder_outputs"]["last_hidden_state"] ) if "attention_mask" in model_kwargs: model_kwargs["attention_mask"] = flatten_beam_dim(model_kwargs["attention_mask"]) # 4. define "xla-compile-able" stop-condition and auto-regressive function # define stop-condition and auto-regressive function def beam_search_cond_fn( cur_len, running_sequences, running_scores, running_beam_indices, sequences, scores, beam_indices, is_sent_finished, decoder_prompt_len, model_kwargs, ): """ Beam Search termination condition function -- halts the generation loop if any of these conditions becomes False """ # 1. is less than max length? not_max_length_yet = cur_len < max_length # 2. can the new beams still improve? # early_stopping == False -> apply heuristic = always get the best score from `cur_len - decoder_prompt_len`. See the discussion # below for more details. # https://github.com/huggingface/transformers/pull/20901#issuecomment-1369845565 # early_stopping == "never" -> compute the best score from max_length or cur_len, depending on the sign of # length_penalty. Positive length_penalty favors longer sequences, thus we use max_length there. if early_stopping == "never" and length_penalty > 0.0: best_running_score = running_scores[:, :1] / ((max_length - decoder_prompt_len) ** length_penalty) else: best_running_score = running_scores[:, :1] / ( tf.cast(cur_len - decoder_prompt_len, dtype=tf.float32) ** length_penalty ) worst_finished_score = tf.where( is_sent_finished, tf.math.reduce_min(scores, axis=1, keepdims=True), -1.0e9 ) improvement_still_possible = tf.math.reduce_any(best_running_score > worst_finished_score) # 3. is there still a beam that has not finished? still_open_beam = ~(tf.math.reduce_all(is_sent_finished) & (early_stopping is True)) return not_max_length_yet & still_open_beam & improvement_still_possible def beam_search_body_fn( cur_len, running_sequences, running_scores, running_beam_indices, sequences, scores, beam_indices, is_sent_finished, decoder_prompt_len, model_kwargs, ): """ Beam Search iterative update function -- each iteration adds a new token and updates the best sequences seen so far """ # 1. Forward current tokens if model_kwargs.get("past_key_values") is None or needs_full_input: input_ids = running_sequences[:, :, :cur_len] else: input_ids = tf.expand_dims(running_sequences[:, :, cur_len - 1], -1) model_inputs = self.prepare_inputs_for_generation( flatten_beam_dim(input_ids), use_cache=use_cache, **model_kwargs ) model_outputs = self( **model_inputs, return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) logits = unflatten_beam_dim(model_outputs.logits[:, -1], num_beams) # 2. Compute log probs # get log probabilities from logits, process logits with processors (*e.g.* min_length, ...), and # add new logprobs to existing running logprobs scores. log_probs = tf.nn.log_softmax(logits) log_probs = logits_processor(flatten_beam_dim(running_sequences), flatten_beam_dim(log_probs), cur_len) log_probs = unflatten_beam_dim(log_probs, num_beams) if do_sample: log_probs = logits_warper(flatten_beam_dim(running_sequences), flatten_beam_dim(log_probs), cur_len) log_probs = unflatten_beam_dim(log_probs, num_beams) log_probs_processed = log_probs log_probs = log_probs + tf.expand_dims(running_scores, axis=2) vocab_size = log_probs.shape[2] log_probs = tf.reshape(log_probs, (batch_size, num_beams * vocab_size)) # Store scores, attentions and hidden_states when required if not use_xla and return_dict_in_generate: if output_scores: all_scores.append( logits_warper( flatten_beam_dim(running_sequences), flatten_beam_dim(log_probs_processed), cur_len, ) ) if output_attentions and self.config.is_encoder_decoder: decoder_attentions.append(model_outputs.decoder_attentions) elif output_attentions and not self.config.is_encoder_decoder: decoder_attentions.append(model_outputs.attentions) if self.config.is_encoder_decoder: cross_attentions.append(model_outputs.cross_attentions) if output_hidden_states and self.config.is_encoder_decoder: decoder_hidden_states.append(model_outputs.decoder_hidden_states) elif output_hidden_states and self.config.is_encoder_decoder: decoder_hidden_states.append(model_outputs.hidden_states) # 3. Retrieve top-K # Each item in batch has num_beams * vocab_size candidate sequences. For each item, get the top 2*k # candidates with the highest log-probabilities. We gather the top 2*K beams here so that even if the # best K sequences reach EOS simultaneously, we have another K sequences remaining to continue the live # beam search. # Gather the top 2*K scores from _all_ beams. # Gather 2*k top beams. # Recover the beam index by floor division. # Recover token id by modulo division and expand Id array for broadcasting. # Update sequences for the 2*K top-k new sequences. beams_to_keep = 2 * num_beams if do_sample: topk_indices = sample_without_replacement(log_probs, beams_to_keep) topk_log_probs = tf.gather(log_probs, topk_indices, axis=1, batch_dims=1) else: topk_log_probs, topk_indices = tf.math.top_k(log_probs, k=beams_to_keep) topk_current_beam_indices = topk_indices // vocab_size topk_running_beam_indices = self._gather_beams(running_beam_indices, topk_current_beam_indices) topk_running_sequences = self._gather_beams(running_sequences, topk_current_beam_indices) topk_ids = topk_indices % vocab_size # writes the new token indices_batch = tf.repeat(tf.range(batch_size), [beams_to_keep]) indices_beam = tf.tile(tf.range(beams_to_keep), [batch_size]) update_indices = tf.stack( [indices_batch, indices_beam, tf.broadcast_to(cur_len, [batch_size * beams_to_keep])], axis=-1 ) topk_sequences = tf.tensor_scatter_nd_update( tensor=topk_running_sequences, indices=update_indices, updates=tf.reshape(topk_ids, [batch_size * beams_to_keep]), ) # we want to store the beam indices with batch information -> real beam index = beam index % num beams batch_modified_indices = topk_current_beam_indices + tf.broadcast_to( tf.expand_dims(tf.range(batch_size) * num_beams, axis=1), topk_current_beam_indices.shape ) update_indices = tf.stack( [ indices_batch, indices_beam, tf.broadcast_to(cur_len - decoder_prompt_len, [batch_size * beams_to_keep]), ], axis=-1, ) topk_beam_indices = tf.tensor_scatter_nd_update( tensor=topk_running_beam_indices, indices=update_indices, updates=tf.reshape(batch_modified_indices, [batch_size * beams_to_keep]), ) # 4. Check which sequences have ended # Update current sequences: Did the top `num_beams` sequences reach an end marker? # To prevent these just finished sequences from being added to the current sequences # set of active beam search sequences, set their log probs to a very large negative value. if eos_token_id is None: eos_in_next_token = tf.zeros(topk_sequences[:, :, cur_len].shape, dtype=tf.bool) else: eos_in_next_token = tf.math.reduce_any( tf.equal( tf.broadcast_to( topk_sequences[:, :, cur_len], [len(eos_token_id)] + topk_sequences[:, :, cur_len].shape, ), tf.expand_dims(tf.expand_dims(eos_token_id, -1), -1), ), axis=0, ) did_topk_just_finished = eos_in_next_token & tf.broadcast_to( tf.concat((tf.ones((num_beams), dtype=tf.bool), tf.zeros((num_beams), dtype=tf.bool)), axis=0), shape_list(eos_in_next_token), ) # non-top `num_beams` eos tokens can't be used to finish a beam, but the others can't be used in the next # running sentences either running_topk_log_probs = topk_log_probs + tf.cast(eos_in_next_token, tf.float32) * -1.0e9 # 5. Get running sequences scores for next # Determine the top k beam indices (from top 2*k beams) from log probs and gather top k beams # (from top 2*k beams). next_topk_indices = tf.math.top_k(running_topk_log_probs, k=num_beams)[1] next_running_sequences, next_running_scores, next_running_beam_indices = self._gather_beams( [topk_sequences, running_topk_log_probs, topk_beam_indices], next_topk_indices ) # 6. Process topk logits # Further process log probs: # - add length penalty # - make sure no scores can be added anymore if beam is full # - make sure still running sequences cannot be chosen as finalized beam topk_log_probs = topk_log_probs / ( tf.cast(cur_len + 1 - decoder_prompt_len, dtype=tf.float32) ** length_penalty ) beams_in_batch_are_full = tf.broadcast_to( tf.math.reduce_all(is_sent_finished, axis=-1, keepdims=True), shape_list(did_topk_just_finished) ) & (early_stopping is True) add_penalty = ~did_topk_just_finished | beams_in_batch_are_full topk_log_probs += tf.cast(add_penalty, tf.float32) * -1.0e9 # 7. Get scores, sequences, is sentence finished for next. # Combine sequences, scores, and flags along the beam dimension and compare new finished sequence scores # to existing finished scores and select the best from the new set of beams merged_sequences = tf.concat([sequences, topk_sequences], axis=1) merged_scores = tf.concat([scores, topk_log_probs], axis=1) merged_beams = tf.concat([beam_indices, topk_beam_indices], axis=1) merged_is_sent_finished = tf.concat([is_sent_finished, did_topk_just_finished], axis=1) topk_merged_indices = tf.math.top_k(merged_scores, k=num_beams)[1] next_sequences, next_scores, next_beam_indices, next_is_sent_finished = self._gather_beams( [merged_sequences, merged_scores, merged_beams, merged_is_sent_finished], topk_merged_indices ) # 8. Prepare data for the next iteration # Determine the top k beam indices from the original set of all beams. With these, gather the top k # beam-associated caches. cur_len = cur_len + 1 if "past_key_values" in model_outputs: cache = tf.nest.map_structure( lambda tensor: unflatten_beam_dim(tensor, num_beams, batch_axis=cache_batch_axis), model_outputs.past_key_values, ) next_running_indices = self._gather_beams(topk_current_beam_indices, next_topk_indices) next_cache = self._gather_beams(cache, next_running_indices, batch_axis=cache_batch_axis) model_outputs["past_key_values"] = tf.nest.map_structure( lambda tensor: flatten_beam_dim(tensor, batch_axis=cache_batch_axis), next_cache ) if use_xla: next_model_kwargs = self._update_model_kwargs_for_xla_generation( model_outputs=model_outputs, model_kwargs=model_kwargs, cur_len=cur_len, max_length=max_length, batch_size=(batch_size * num_beams), is_encoder_decoder=self.config.is_encoder_decoder, batch_axis=cache_batch_axis, ) else: next_model_kwargs = self._update_model_kwargs_for_generation( model_outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) # if we don't cache past_key_values key values we need the whole input if model_kwargs.get("past_key_values", None) is None: # let's throw out `past_key_values` since we don't want `None` tensors model_kwargs.pop("past_key_values", None) return ( cur_len, next_running_sequences, next_running_scores, next_running_beam_indices, next_sequences, next_scores, next_beam_indices, next_is_sent_finished, decoder_prompt_len, next_model_kwargs, ) # 5. run generation # 1st generation step has to be run before to initialize `past_key_values` (if active) ( cur_len, running_sequences, running_scores, running_beam_indices, sequences, scores, beam_indices, is_sent_finished, decoder_prompt_len, model_kwargs, ) = beam_search_body_fn( cur_len, running_sequences, running_scores, running_beam_indices, sequences, scores, beam_indices, is_sent_finished, decoder_prompt_len, model_kwargs, ) # 2-to-n generation steps can then be run in autoregressive fashion (only in case 1st generation step does # NOT yield EOS token though) maximum_iterations = max_length - cur_len ( cur_len, running_sequences, running_scores, running_beam_indices, sequences, scores, beam_indices, is_sent_finished, decoder_prompt_len, _, ) = tf.while_loop( beam_search_cond_fn, beam_search_body_fn, ( cur_len, running_sequences, running_scores, running_beam_indices, sequences, scores, beam_indices, is_sent_finished, decoder_prompt_len, model_kwargs, ), maximum_iterations=maximum_iterations, ) # 6. prepare outputs # Account for the edge-case where there are no finished sequences for a particular batch item. If so, return # running sequences for that batch item. none_finished = tf.math.reduce_any(is_sent_finished, axis=1) sequences = tf.where(none_finished[:, None, None], sequences, running_sequences) beam_indices = tf.where(none_finished[:, None, None], beam_indices, running_beam_indices) # Apply the length penalty so that running scores match the finalized scores if they are used running_scores = running_scores / (tf.cast(cur_len - decoder_prompt_len, dtype=tf.float32) ** length_penalty) scores = tf.where(none_finished[:, None], scores, running_scores) # Take best beams for each batch (the score is sorted in descending order) sequences = flatten_beam_dim(sequences[:, :num_return_sequences, :]) scores = flatten_beam_dim(scores[:, :num_return_sequences]) beam_indices = flatten_beam_dim(beam_indices[:, :num_return_sequences, :]) if not use_xla: # Cut for backward compatibility sequences = sequences[:, :cur_len] beam_indices = beam_indices[:, : cur_len - decoder_prompt_len] if return_dict_in_generate: if self.config.is_encoder_decoder: # if model is an encoder-decoder, retrieve encoder attention weights and hidden states encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) output_cls = TFBeamSampleEncoderDecoderOutput if do_sample else TFBeamSearchEncoderDecoderOutput return output_cls( sequences=sequences, sequences_scores=scores, scores=all_scores, beam_indices=beam_indices, encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: output_cls = TFBeamSampleDecoderOnlyOutput if do_sample else TFBeamSearchDecoderOnlyOutput return output_cls( sequences=sequences, sequences_scores=scores, scores=all_scores, beam_indices=beam_indices, attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return sequences def contrastive_search( self, input_ids: tf.Tensor, top_k: Optional[int] = 1, penalty_alpha: Optional[float] = 0, logits_processor: Optional[TFLogitsProcessorList] = None, logits_warper: Optional[TFLogitsProcessorList] = None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, **model_kwargs, ) -> Union[TFContrastiveSearchOutput, tf.Tensor]: r""" Generates sequences of token ids for models with a language modeling head using **contrastive search** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. Parameters: input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. top_k (`int`, *optional*, defaults to 1): The size of the candidate set that is used to re-rank for contrastive search penalty_alpha (`float`, *optional*, defaults to 0): The degeneration penalty for contrastive search; activate when it is larger than 0 logits_processor (`TFLogitsProcessorList`, *optional*): An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. logits_warper (`TFLogitsProcessorList`, *optional*): An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsWarper`] used to warp the prediction score distribution of the language modeling head applied before multinomial sampling at each generation step. max_length (`int`, *optional*, defaults to 20): The maximum length of the sequence to be generated. pad_token_id (`int`, *optional*): The id of the *padding* token. eos_token_id (`Union[int, List[int]]`, *optional*): The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more details. output_hidden_states (`bool`, *optional*, defaults to `False`): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more details. output_scores (`bool`, *optional*, defaults to `False`): Whether or not to return the prediction scores. See `scores` under returned tensors for more details. return_dict_in_generate (`bool`, *optional*, defaults to `False`): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. model_kwargs: Additional model specific keyword arguments will be forwarded to the `call` function of the model. If model is an encoder-decoder model the kwargs should include `encoder_outputs`. Return: [`~generation.TFContrastiveSearchDecoderOnlyOutput`], [`~generation.TFContrastiveSearchEncoderDecoderOutput`] or `tf.Tensor`: A `tf.Tensor` containing the generated tokens (default behaviour) or a [`~generation.TFContrastiveySearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a [`~generation.TFContrastiveSearchEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. Examples: ```python >>> from transformers import AutoTokenizer, TFAutoModelForCausalLM >>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-125m") >>> model = TFAutoModelForCausalLM.from_pretrained("facebook/opt-125m") >>> # set pad_token_id to eos_token_id because OPT does not have a PAD token >>> model.config.pad_token_id = model.config.eos_token_id >>> input_prompt = "DeepMind Company is" >>> input_ids = tokenizer(input_prompt, return_tensors="tf") >>> outputs = model.contrastive_search(**input_ids, penalty_alpha=0.6, top_k=4, max_length=64) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['DeepMind Company is a company that focuses on the development and commercialization of artificial intelligence (AI). DeepMind’s mission is to help people understand and solve problems that are difficult to solve in the world today.\n\nIn this post, we talk about the benefits of deep learning in business and how it'] ```""" def gather_best_candidate(nested, selected_idx_stacked, batch_axis=0): """Gathers the slices indexed by selected_idx_stacked from a potentially nested structure of tensors.""" def gather_fn(tensor): gathered_tensor = tf.gather(params=tensor, indices=selected_idx_stacked, axis=batch_axis) return gathered_tensor return tf.nest.map_structure(gather_fn, nested) # 1. init greedy_search values logits_processor = logits_processor if logits_processor is not None else TFLogitsProcessorList() logits_warper = logits_warper if logits_warper is not None else TFLogitsProcessorList() max_length = max_length if max_length is not None else self.generation_config.max_length pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id if isinstance(eos_token_id, int): eos_token_id = [eos_token_id] output_scores = output_scores if output_scores is not None else self.generation_config.output_scores output_attentions = ( output_attentions if output_attentions is not None else self.generation_config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.generation_config.return_dict_in_generate ) use_cache = True # In contrastive search, we always use cache model_kwargs.pop("use_cache", None) use_xla = not tf.executing_eagerly() # TODO (Joao): fix cache format or find programatic way to detect cache index # GPT2 and other models has a slightly different cache structure, with a different batch axis model_name = str(self.decoder) if "EncoderDecoder" in str(self) else str(self) cache_batch_axis = 1 if any(model_prefix in model_name for model_prefix in ("TFGPT2", "TFCTRL")) else 0 # 2. init `attentions`, `hidden_states`, and `scores` tuples scores = [] if (return_dict_in_generate and output_scores) else None decoder_attentions = [] if (return_dict_in_generate and output_attentions) else None cross_attentions = [] if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = [] if (return_dict_in_generate and output_hidden_states) else None # 3. init tensors to use for "xla-compileable" generate function batch_size, cur_len = shape_list(input_ids) # initialize `generated` (`input_ids` padded with `pad_token_id`), `finished_sequences` input_ids_padding = tf.ones((batch_size, max_length - cur_len), dtype=tf.int32) * (pad_token_id or 0) generated = tf.concat([input_ids, input_ids_padding], axis=-1) finished_sequences = tf.zeros((batch_size,), dtype=tf.bool) # 4. define "xla-compile-able" stop-condition and auto-regressive function # define condition fn def contrastive_search_cond_fn( generated, finished_sequences, cur_len, model_kwargs, next_step_cached_variables ): """state termination condition fn.""" return ~tf.reduce_all(finished_sequences) # define condition fn def contrastive_search_body_fn( generated, finished_sequences, cur_len, model_kwargs, next_step_cached_variables ): """state update fn.""" # if the first step in the loop, encode all the prefix and obtain: (1) past_key_values; # (2) last_hidden_states; (3) logit_for_next_step; (4) update model kwargs for the next step if model_kwargs.get("past_key_values") is None: # prepare inputs model_inputs = self.prepare_inputs_for_generation( generated[:, :cur_len], use_cache=use_cache, **model_kwargs ) # encode the given prefix and prepare model inputs; encoder-decoder model process the prefix and save # the `encoder_outputs` outputs = self( **model_inputs, return_dict=True, output_hidden_states=True, output_attentions=output_attentions ) # last decoder hidden states will be used to compute the degeneration penalty (cosine similarity with # previous tokens) if self.config.is_encoder_decoder: last_hidden_states = outputs.decoder_hidden_states[-1] else: last_hidden_states = outputs.hidden_states[-1] # XLA: last_hidden_states normally grows at each step, but in XLA it is padded so as to be used across # iterations (with fixed shapes) if use_xla: last_hidden_states = tf.pad(last_hidden_states, [[0, 0], [0, max_length - cur_len], [0, 0]]) # next logit for contrastive search to select top-k candidate tokens logit_for_next_step = outputs.logits[:, -1, :] if use_xla: model_kwargs = self._update_model_kwargs_for_xla_generation( model_outputs=outputs, model_kwargs=model_kwargs, cur_len=cur_len, max_length=max_length, batch_size=batch_size, is_encoder_decoder=self.config.is_encoder_decoder, batch_axis=cache_batch_axis, ) else: model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) # Expands model inputs top_k times, for batched forward passes (akin to beam search). _, model_kwargs = self._expand_inputs_for_generation( expand_size=top_k, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs ) past_key_values = model_kwargs.get("past_key_values") if past_key_values is None: raise ValueError( f"{self.__class__.__name__} does not support caching and therefore **can't** be used " "for contrastive search." ) elif ( not isinstance(past_key_values[0], (tuple, tf.Tensor)) or past_key_values[0][0].shape[0] != batch_size ): raise ValueError( f"{self.__class__.__name__} does not have a standard cache format and therefore **can't** be " "used for contrastive search without further modifications." ) else: logit_for_next_step = next_step_cached_variables["logit_for_next_step"] last_hidden_states = next_step_cached_variables["last_hidden_states"] outputs = next_step_cached_variables["outputs"] # contrastive_search main logic start: # contrastive search decoding consists of two steps: (1) candidate tokens recall; (2) candidate re-rank by # degeneration penalty logit_for_next_step = logits_processor(generated, logit_for_next_step, cur_len) logit_for_next_step = logits_warper(generated, logit_for_next_step, cur_len) next_probs = stable_softmax(logit_for_next_step, axis=-1) top_k_probs, top_k_ids = tf.math.top_k(next_probs, k=top_k) # Store scores, attentions and hidden_states when required if not use_xla and return_dict_in_generate: if output_scores: scores.append(logit_for_next_step) if output_attentions and self.config.is_encoder_decoder: decoder_attentions.append(outputs.decoder_attentions) elif output_attentions and not self.config.is_encoder_decoder: decoder_attentions.append(outputs.attentions) if self.config.is_encoder_decoder: cross_attentions.append(outputs.cross_attentions) if output_hidden_states and self.config.is_encoder_decoder: decoder_hidden_states.append(outputs.decoder_hidden_states) elif output_hidden_states and self.config.is_encoder_decoder: decoder_hidden_states.append(outputs.hidden_states) # Replicates the new past_key_values to match the `top_k` candidates model_kwargs["past_key_values"] = tf.nest.map_structure( lambda tensor: tf.repeat(tensor, top_k, axis=cache_batch_axis), model_kwargs["past_key_values"] ) # compute the candidate tokens by the language model and collects their hidden_states next_model_inputs = self.prepare_inputs_for_generation( tf.reshape(top_k_ids, [-1, 1]), use_cache=use_cache, **model_kwargs ) outputs = self( **next_model_inputs, return_dict=True, output_hidden_states=True, output_attentions=output_attentions ) next_past_key_values = self._extract_past_from_model_output(outputs) logits = outputs.logits[:, -1, :] # name is different for encoder-decoder and decoder-only models if self.config.is_encoder_decoder: next_hidden = outputs.decoder_hidden_states[-1] full_hidden_states = outputs.decoder_hidden_states else: next_hidden = outputs.hidden_states[-1] full_hidden_states = outputs.hidden_states context_hidden = tf.repeat(last_hidden_states[:, :cur_len, :], top_k, axis=0) # compute the degeneration penalty and re-rank the candidates based on the degeneration penalty and the # model confidence selected_idx = _ranking_fast(context_hidden, next_hidden, top_k_probs, penalty_alpha, top_k) # converts indices to a dimension of top_k to the stacked top_k * batch_size dimension, for indexing # without a need to reshape on tensors that have these two dimensions stacked selected_idx_stacked = selected_idx + tf.range(selected_idx.shape[0], dtype=tf.int64) * top_k # prepare for the next step: (1) next token_id; (2) past_key_values; (3) last_hidden_states for computing # the degeneration penalty; (4) logits for selecting next top-k candidates; (5) selected tokens scores # (model confidence minus degeneration penalty); (6) decoder hidden_states next_tokens = tf.gather(top_k_ids, selected_idx, axis=1, batch_dims=1) next_hidden = gather_best_candidate(next_hidden, selected_idx_stacked) # XLA: last_hidden_states normally grows at each step, but in XLA it is padded so as to be used across # iterations (with fixed shapes) if use_xla: last_hidden_states = dynamic_update_slice(last_hidden_states, next_hidden, [0, cur_len, 0]) else: last_hidden_states = tf.concat([last_hidden_states, next_hidden], axis=1) next_decoder_hidden_states = gather_best_candidate(full_hidden_states, selected_idx_stacked) next_past_key_values = gather_best_candidate( next_past_key_values, selected_idx_stacked, batch_axis=cache_batch_axis ) logit_for_next_step = gather_best_candidate(logits, selected_idx_stacked) # Rebuilds the relevant parts of the model output for the selected token, for use in the next iteration if self.config.is_encoder_decoder: next_step_cross_attentions = () next_step_decoder_attentions = () if output_attentions: next_step_cross_attentions = gather_best_candidate(outputs.cross_attentions, selected_idx_stacked) next_step_decoder_attentions = gather_best_candidate( outputs.decoder_attentions, selected_idx_stacked ) outputs = TFSeq2SeqLMOutput( past_key_values=next_past_key_values, decoder_hidden_states=next_decoder_hidden_states, decoder_attentions=next_step_decoder_attentions or None, cross_attentions=next_step_cross_attentions or None, ) else: next_step_attentions = () if output_attentions: next_step_attentions = gather_best_candidate(outputs.attentions, selected_idx_stacked) outputs = TFCausalLMOutputWithPast( past_key_values=next_past_key_values, hidden_states=next_decoder_hidden_states, attentions=next_step_attentions or None, ) # contrastive_search main logic end if eos_token_id is not None: if pad_token_id is None: raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.") unfinished_seq = 1 - tf.cast(finished_sequences, tf.int32) next_tokens = next_tokens * unfinished_seq + pad_token_id * (1 - unfinished_seq) next_token_is_eos = tf.math.reduce_any( tf.equal( tf.broadcast_to(next_tokens, (len(eos_token_id), batch_size)), tf.expand_dims(eos_token_id, -1) ), axis=0, ) finished_sequences = finished_sequences | next_token_is_eos # update `generated` and `cur_len` update_indices = tf.stack([tf.range(batch_size), tf.broadcast_to(cur_len, [batch_size])], axis=-1) generated = tf.tensor_scatter_nd_update(tensor=generated, indices=update_indices, updates=next_tokens) cur_len += 1 if use_xla: # NOTE: 1) relative to other generation strategies, contrastive search is always running forward # passes one step ahead -- hence the `cur_len=cur_len + 1`; 2) the attention mask here is expanded from # [batch_size, ...] to [batch_size*top_k, ...] -- hence the `batch_size=batch_size * top_k` model_kwargs = self._update_model_kwargs_for_xla_generation( model_outputs=outputs, model_kwargs=model_kwargs, cur_len=cur_len + 1, max_length=max_length, batch_size=batch_size * top_k, is_encoder_decoder=self.config.is_encoder_decoder, batch_axis=cache_batch_axis, ) else: model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) next_step_cached_variables = { "logit_for_next_step": logit_for_next_step, "last_hidden_states": last_hidden_states, "outputs": outputs, } return generated, finished_sequences, cur_len, model_kwargs, next_step_cached_variables # 5. run generation # 1st generation step has to be run before to initialize `past_key_values` generated, finished_sequences, cur_len, model_kwargs, next_step_cached_variables = contrastive_search_body_fn( generated, finished_sequences, cur_len, model_kwargs, None ) # 2-to-n generation steps can then be run in autoregressive fashion # only in case 1st generation step does NOT yield EOS token though maximum_iterations = max_length - cur_len generated, _, cur_len, _, _ = tf.while_loop( contrastive_search_cond_fn, contrastive_search_body_fn, (generated, finished_sequences, cur_len, model_kwargs, next_step_cached_variables), maximum_iterations=maximum_iterations, ) # 6. prepare outputs if not use_xla: # cut for backward compatibility generated = generated[:, :cur_len] if return_dict_in_generate: if self.config.is_encoder_decoder: # if model is an encoder-decoder, retrieve encoder attention weights # and hidden states encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) scores = tuple(scores) if scores is not None else None decoder_attentions = tuple(decoder_attentions) if decoder_attentions is not None else None cross_attentions = tuple(cross_attentions) if cross_attentions is not None else None decoder_hidden_states = tuple(decoder_hidden_states) if decoder_hidden_states is not None else None return TFContrastiveSearchEncoderDecoderOutput( sequences=generated, scores=scores, encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, cross_attentions=cross_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return TFContrastiveSearchDecoderOnlyOutput( sequences=generated, scores=scores, attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return generated def scatter_values_on_batch_indices(values, batch_indices): shape = shape_list(batch_indices) # broadcast batch dim to shape broad_casted_batch_dims = tf.reshape(tf.broadcast_to(tf.expand_dims(tf.range(shape[0]), axis=-1), shape), [1, -1]) # transform batch_indices to pair_indices pair_indices = tf.transpose(tf.concat([broad_casted_batch_dims, tf.reshape(batch_indices, [1, -1])], 0)) # scatter values to pair indices return tf.scatter_nd(pair_indices, tf.reshape(values, [-1]), shape) def sample_without_replacement(logits, num_samples): """ categorical sampling without replacement is currently not implemented the gumbel-max trick will do for now see https://github.com/tensorflow/tensorflow/issues/9260 for more info """ z = -tf.math.log(-tf.math.log(tf.random.uniform(shape_list(logits), 0, 1))) _, indices = tf.nn.top_k(logits + z, num_samples) return indices def _ranking_fast( context_hidden: tf.Tensor, next_hidden: tf.Tensor, next_top_k_probs: tf.Tensor, alpha: float, beam_width: int, ) -> tf.Tensor: """ Reranks the top_k candidates based on a degeneration penalty (cosine similarity with previous tokens), as described in the paper "A Contrastive Framework for Neural Text Generation". Returns the index of the best candidate for each row in the batch. """ norm_context_hidden = context_hidden / tf.norm(context_hidden, axis=2, keepdims=True) norm_next_hidden = next_hidden / tf.norm(next_hidden, axis=2, keepdims=True) cosine_matrix = tf.squeeze(tf.linalg.matmul(norm_context_hidden, norm_next_hidden, transpose_b=True), axis=-1) degeneration_penalty = tf.reduce_max(cosine_matrix, axis=-1) next_top_k_probs = tf.reshape(next_top_k_probs, shape=[-1]) contrastive_score = (1.0 - alpha) * next_top_k_probs - alpha * degeneration_penalty contrastive_score = tf.reshape(contrastive_score, shape=[-1, beam_width]) selected_idx = tf.argmax(contrastive_score, axis=1) return selected_idx
transformers/src/transformers/generation/tf_utils.py/0
{ "file_path": "transformers/src/transformers/generation/tf_utils.py", "repo_id": "transformers", "token_count": 76509 }
324
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import warnings from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union from ..utils import ( check_peft_version, find_adapter_config_file, is_accelerate_available, is_peft_available, is_torch_available, logging, ) if is_accelerate_available(): from accelerate import dispatch_model from accelerate.utils import get_balanced_memory, infer_auto_device_map # Minimum PEFT version supported for the integration MIN_PEFT_VERSION = "0.5.0" if TYPE_CHECKING: if is_torch_available(): import torch logger = logging.get_logger(__name__) class PeftAdapterMixin: """ A class containing all functions for loading and using adapters weights that are supported in PEFT library. For more details about adapters and injecting them on a transformer-based model, check out the documentation of PEFT library: https://huggingface.co/docs/peft/index Currently supported PEFT methods are all non-prefix tuning methods. Below is the list of supported PEFT methods that anyone can load, train and run with this mixin class: - Low Rank Adapters (LoRA): https://huggingface.co/docs/peft/conceptual_guides/lora - IA3: https://huggingface.co/docs/peft/conceptual_guides/ia3 - AdaLora: https://arxiv.org/abs/2303.10512 Other PEFT models such as prompt tuning, prompt learning are out of scope as these adapters are not "injectable" into a torch module. For using these methods, please refer to the usage guide of PEFT library. With this mixin, if the correct PEFT version is installed, it is possible to: - Load an adapter stored on a local path or in a remote Hub repository, and inject it in the model - Attach new adapters in the model and train them with Trainer or by your own. - Attach multiple adapters and iteratively activate / deactivate them - Activate / deactivate all adapters from the model. - Get the `state_dict` of the active adapter. """ _hf_peft_config_loaded = False def load_adapter( self, peft_model_id: Optional[str] = None, adapter_name: Optional[str] = None, revision: Optional[str] = None, token: Optional[str] = None, device_map: Optional[str] = "auto", max_memory: Optional[str] = None, offload_folder: Optional[str] = None, offload_index: Optional[int] = None, peft_config: Dict[str, Any] = None, adapter_state_dict: Optional[Dict[str, "torch.Tensor"]] = None, adapter_kwargs: Optional[Dict[str, Any]] = None, ) -> None: """ Load adapter weights from file or remote Hub folder. If you are not familiar with adapters and PEFT methods, we invite you to read more about them on PEFT official documentation: https://huggingface.co/docs/peft Requires peft as a backend to load the adapter weights. Args: peft_model_id (`str`, *optional*): The identifier of the model to look for on the Hub, or a local path to the saved adapter config file and adapter weights. adapter_name (`str`, *optional*): The adapter name to use. If not set, will use the default adapter. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. <Tip> To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>". </Tip> token (`str`, `optional`): Whether to use authentication token to load the remote folder. Userful to load private repositories that are on HuggingFace Hub. You might need to call `huggingface-cli login` and paste your tokens to cache it. device_map (`str` or `Dict[str, Union[int, str, torch.device]]` or `int` or `torch.device`, *optional*): A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the same device. If we only pass the device (*e.g.*, `"cpu"`, `"cuda:1"`, `"mps"`, or a GPU ordinal rank like `1`) on which the model will be allocated, the device map will map the entire model to this device. Passing `device_map = 0` means put the whole model on GPU 0. To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For more information about each option see [designing a device map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). max_memory (`Dict`, *optional*): A dictionary device identifier to maximum memory. Will default to the maximum memory available for each GPU and the available CPU RAM if unset. offload_folder (`str` or `os.PathLike`, `optional`): If the `device_map` contains any value `"disk"`, the folder where we will offload weights. offload_index (`int`, `optional`): `offload_index` argument to be passed to `accelerate.dispatch_model` method. peft_config (`Dict[str, Any]`, *optional*): The configuration of the adapter to add, supported adapters are non-prefix tuning and adaption prompts methods. This argument is used in case users directly pass PEFT state dicts adapter_state_dict (`Dict[str, torch.Tensor]`, *optional*): The state dict of the adapter to load. This argument is used in case users directly pass PEFT state dicts adapter_kwargs (`Dict[str, Any]`, *optional*): Additional keyword arguments passed along to the `from_pretrained` method of the adapter config and `find_adapter_config_file` method. """ check_peft_version(min_version=MIN_PEFT_VERSION) adapter_name = adapter_name if adapter_name is not None else "default" if adapter_kwargs is None: adapter_kwargs = {} from peft import PeftConfig, inject_adapter_in_model, load_peft_weights from peft.utils import set_peft_model_state_dict if self._hf_peft_config_loaded and adapter_name in self.peft_config: raise ValueError(f"Adapter with name {adapter_name} already exists. Please use a different name.") if peft_model_id is None and (adapter_state_dict is None and peft_config is None): raise ValueError( "You should either pass a `peft_model_id` or a `peft_config` and `adapter_state_dict` to load an adapter." ) # We keep `revision` in the signature for backward compatibility if revision is not None and "revision" not in adapter_kwargs: adapter_kwargs["revision"] = revision elif revision is not None and "revision" in adapter_kwargs and revision != adapter_kwargs["revision"]: logger.error( "You passed a `revision` argument both in `adapter_kwargs` and as a standalone argument. " "The one in `adapter_kwargs` will be used." ) # Override token with adapter_kwargs' token if "token" in adapter_kwargs: token = adapter_kwargs.pop("token") if peft_config is None: adapter_config_file = find_adapter_config_file( peft_model_id, token=token, **adapter_kwargs, ) if adapter_config_file is None: raise ValueError( f"adapter model file not found in {peft_model_id}. Make sure you are passing the correct path to the " "adapter model." ) peft_config = PeftConfig.from_pretrained( peft_model_id, token=token, **adapter_kwargs, ) # Create and add fresh new adapters into the model. inject_adapter_in_model(peft_config, self, adapter_name) if not self._hf_peft_config_loaded: self._hf_peft_config_loaded = True if peft_model_id is not None: adapter_state_dict = load_peft_weights(peft_model_id, token=token, **adapter_kwargs) # We need to pre-process the state dict to remove unneeded prefixes - for backward compatibility processed_adapter_state_dict = {} prefix = "base_model.model." for key, value in adapter_state_dict.items(): if key.startswith(prefix): new_key = key[len(prefix) :] else: new_key = key processed_adapter_state_dict[new_key] = value # Load state dict incompatible_keys = set_peft_model_state_dict(self, processed_adapter_state_dict, adapter_name) if incompatible_keys is not None: # check only for unexpected keys if hasattr(incompatible_keys, "unexpected_keys") and len(incompatible_keys.unexpected_keys) > 0: logger.warning( f"Loading adapter weights from {peft_model_id} led to unexpected keys not found in the model: " f" {incompatible_keys.unexpected_keys}. " ) # Re-dispatch model and hooks in case the model is offloaded to CPU / Disk. if ( (getattr(self, "hf_device_map", None) is not None) and (len(set(self.hf_device_map.values()).intersection({"cpu", "disk"})) > 0) and len(self.peft_config) == 1 ): self._dispatch_accelerate_model( device_map=device_map, max_memory=max_memory, offload_folder=offload_folder, offload_index=offload_index, ) def add_adapter(self, adapter_config, adapter_name: Optional[str] = None) -> None: r""" If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT official documentation: https://huggingface.co/docs/peft Adds a fresh new adapter to the current model for training purpose. If no adapter name is passed, a default name is assigned to the adapter to follow the convention of PEFT library (in PEFT we use "default" as the default adapter name). Args: adapter_config (`~peft.PeftConfig`): The configuration of the adapter to add, supported adapters are non-prefix tuning and adaption prompts methods adapter_name (`str`, *optional*, defaults to `"default"`): The name of the adapter to add. If no name is passed, a default name is assigned to the adapter. """ check_peft_version(min_version=MIN_PEFT_VERSION) from peft import PeftConfig, inject_adapter_in_model adapter_name = adapter_name or "default" if not self._hf_peft_config_loaded: self._hf_peft_config_loaded = True elif adapter_name in self.peft_config: raise ValueError(f"Adapter with name {adapter_name} already exists. Please use a different name.") if not isinstance(adapter_config, PeftConfig): raise ValueError( f"adapter_config should be an instance of PeftConfig. Got {type(adapter_config)} instead." ) # Retrieve the name or path of the model, one could also use self.config._name_or_path # but to be consistent with what we do in PEFT: https://github.com/huggingface/peft/blob/6e783780ca9df3a623992cc4d1d665001232eae0/src/peft/mapping.py#L100 adapter_config.base_model_name_or_path = self.__dict__.get("name_or_path", None) inject_adapter_in_model(adapter_config, self, adapter_name) self.set_adapter(adapter_name) def set_adapter(self, adapter_name: Union[List[str], str]) -> None: """ If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT official documentation: https://huggingface.co/docs/peft Sets a specific adapter by forcing the model to use a that adapter and disable the other adapters. Args: adapter_name (`Union[List[str], str]`): The name of the adapter to set. Can be also a list of strings to set multiple adapters. """ check_peft_version(min_version=MIN_PEFT_VERSION) if not self._hf_peft_config_loaded: raise ValueError("No adapter loaded. Please load an adapter first.") elif isinstance(adapter_name, list): missing = set(adapter_name) - set(self.peft_config) if len(missing) > 0: raise ValueError( f"Following adapter(s) could not be found: {', '.join(missing)}. Make sure you are passing the correct adapter name(s)." f" current loaded adapters are: {list(self.peft_config.keys())}" ) elif adapter_name not in self.peft_config: raise ValueError( f"Adapter with name {adapter_name} not found. Please pass the correct adapter name among {list(self.peft_config.keys())}" ) from peft.tuners.tuners_utils import BaseTunerLayer from peft.utils import ModulesToSaveWrapper _adapters_has_been_set = False for _, module in self.named_modules(): if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)): # For backward compatbility with previous PEFT versions if hasattr(module, "set_adapter"): module.set_adapter(adapter_name) else: module.active_adapter = adapter_name _adapters_has_been_set = True if not _adapters_has_been_set: raise ValueError( "Did not succeeded in setting the adapter. Please make sure you are using a model that supports adapters." ) def disable_adapters(self) -> None: r""" If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT official documentation: https://huggingface.co/docs/peft Disable all adapters that are attached to the model. This leads to inferring with the base model only. """ check_peft_version(min_version=MIN_PEFT_VERSION) if not self._hf_peft_config_loaded: raise ValueError("No adapter loaded. Please load an adapter first.") from peft.tuners.tuners_utils import BaseTunerLayer from peft.utils import ModulesToSaveWrapper for _, module in self.named_modules(): if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)): # The recent version of PEFT need to call `enable_adapters` instead if hasattr(module, "enable_adapters"): module.enable_adapters(enabled=False) else: module.disable_adapters = True def enable_adapters(self) -> None: """ If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT official documentation: https://huggingface.co/docs/peft Enable adapters that are attached to the model. The model will use `self.active_adapter()` """ check_peft_version(min_version=MIN_PEFT_VERSION) if not self._hf_peft_config_loaded: raise ValueError("No adapter loaded. Please load an adapter first.") from peft.tuners.tuners_utils import BaseTunerLayer for _, module in self.named_modules(): if isinstance(module, BaseTunerLayer): # The recent version of PEFT need to call `enable_adapters` instead if hasattr(module, "enable_adapters"): module.enable_adapters(enabled=True) else: module.disable_adapters = False def active_adapters(self) -> List[str]: """ If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT official documentation: https://huggingface.co/docs/peft Gets the current active adapters of the model. In case of multi-adapter inference (combining multiple adapters for inference) returns the list of all active adapters so that users can deal with them accordingly. For previous PEFT versions (that does not support multi-adapter inference), `module.active_adapter` will return a single string. """ check_peft_version(min_version=MIN_PEFT_VERSION) if not is_peft_available(): raise ImportError("PEFT is not available. Please install PEFT to use this function: `pip install peft`.") if not self._hf_peft_config_loaded: raise ValueError("No adapter loaded. Please load an adapter first.") from peft.tuners.tuners_utils import BaseTunerLayer for _, module in self.named_modules(): if isinstance(module, BaseTunerLayer): active_adapters = module.active_adapter break # For previous PEFT versions if isinstance(active_adapters, str): active_adapters = [active_adapters] return active_adapters def active_adapter(self) -> str: warnings.warn( "The `active_adapter` method is deprecated and will be removed in a future version.", FutureWarning ) return self.active_adapters()[0] def get_adapter_state_dict(self, adapter_name: Optional[str] = None) -> dict: """ If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT official documentation: https://huggingface.co/docs/peft Gets the adapter state dict that should only contain the weights tensors of the specified adapter_name adapter. If no adapter_name is passed, the active adapter is used. Args: adapter_name (`str`, *optional*): The name of the adapter to get the state dict from. If no name is passed, the active adapter is used. """ check_peft_version(min_version=MIN_PEFT_VERSION) if not self._hf_peft_config_loaded: raise ValueError("No adapter loaded. Please load an adapter first.") from peft import get_peft_model_state_dict if adapter_name is None: adapter_name = self.active_adapter() adapter_state_dict = get_peft_model_state_dict(self, adapter_name=adapter_name) return adapter_state_dict def _dispatch_accelerate_model( self, device_map: str, max_memory: Optional[int] = None, offload_folder: Optional[str] = None, offload_index: Optional[int] = None, ) -> None: """ Optional re-dispatch the model and attach new hooks to the model in case the model has been loaded with accelerate (i.e. with `device_map=xxx`) Args: device_map (`str` or `Dict[str, Union[int, str, torch.device]]` or `int` or `torch.device`, *optional*): A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the same device. If we only pass the device (*e.g.*, `"cpu"`, `"cuda:1"`, `"mps"`, or a GPU ordinal rank like `1`) on which the model will be allocated, the device map will map the entire model to this device. Passing `device_map = 0` means put the whole model on GPU 0. To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For more information about each option see [designing a device map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). max_memory (`Dict`, *optional*): A dictionary device identifier to maximum memory. Will default to the maximum memory available for each GPU and the available CPU RAM if unset. offload_folder (`str` or `os.PathLike`, *optional*): If the `device_map` contains any value `"disk"`, the folder where we will offload weights. offload_index (`int`, *optional*): The offload_index argument to be passed to `accelerate.dispatch_model` method. """ dispatch_model_kwargs = {} # Safety checker for previous `accelerate` versions # `offload_index` was introduced in https://github.com/huggingface/accelerate/pull/873/ if "offload_index" in inspect.signature(dispatch_model).parameters: dispatch_model_kwargs["offload_index"] = offload_index no_split_module_classes = self._no_split_modules if device_map != "sequential": max_memory = get_balanced_memory( self, max_memory=max_memory, no_split_module_classes=no_split_module_classes, low_zero=(device_map == "balanced_low_0"), ) if isinstance(device_map, str): device_map = infer_auto_device_map( self, max_memory=max_memory, no_split_module_classes=no_split_module_classes ) dispatch_model( self, device_map=device_map, offload_dir=offload_folder, **dispatch_model_kwargs, )
transformers/src/transformers/integrations/peft.py/0
{ "file_path": "transformers/src/transformers/integrations/peft.py", "repo_id": "transformers", "token_count": 9185 }
325
/*! ************************************************************************************************** * Deformable DETR * Copyright (c) 2020 SenseTime. All Rights Reserved. * Licensed under the Apache License, Version 2.0 [see LICENSE for details] ************************************************************************************************** * Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 ************************************************************************************************** */ #pragma once #include <torch/extension.h> at::Tensor ms_deform_attn_cuda_forward( const at::Tensor &value, const at::Tensor &spatial_shapes, const at::Tensor &level_start_index, const at::Tensor &sampling_loc, const at::Tensor &attn_weight, const int im2col_step); std::vector<at::Tensor> ms_deform_attn_cuda_backward( const at::Tensor &value, const at::Tensor &spatial_shapes, const at::Tensor &level_start_index, const at::Tensor &sampling_loc, const at::Tensor &attn_weight, const at::Tensor &grad_output, const int im2col_step);
transformers/src/transformers/kernels/deta/cuda/ms_deform_attn_cuda.h/0
{ "file_path": "transformers/src/transformers/kernels/deta/cuda/ms_deform_attn_cuda.h", "repo_id": "transformers", "token_count": 353 }
326
#include <torch/extension.h> #include <ATen/ATen.h> #include <vector> std::vector<at::Tensor> fast_hash_ver1_kernel( at::Tensor query_mask, at::Tensor query_vector, at::Tensor key_mask, at::Tensor key_vector, int num_hash_f, int hash_code_len, bool use_cuda ); at::Tensor lsh_cumulation_ver1_kernel( at::Tensor query_mask, at::Tensor query_hash_code, at::Tensor key_mask, at::Tensor key_hash_code, at::Tensor value, int hashtable_capacity, bool use_cuda ); at::Tensor lsh_weighted_cumulation_ver1_kernel( at::Tensor query_mask, at::Tensor query_hash_code, at::Tensor query_weight, at::Tensor key_mask, at::Tensor key_hash_code, at::Tensor key_weight, at::Tensor value, int hashtable_capacity, bool use_cuda ); at::Tensor lsh_weighted_cumulation_ver2_kernel( at::Tensor query_mask, at::Tensor query_hash_code, at::Tensor query_weight, at::Tensor key_mask, at::Tensor key_hash_code, at::Tensor key_weight, at::Tensor value, int hashtable_capacity, bool use_cuda ); at::Tensor lsh_weighted_cumulation_ver3_kernel( at::Tensor query_mask, at::Tensor query_hash_code, at::Tensor query_weight, at::Tensor key_mask, at::Tensor key_hash_code, at::Tensor key_weight, at::Tensor value, int hashtable_capacity, bool use_cuda ); at::Tensor lsh_weighted_cumulation_ver4_kernel( at::Tensor query_mask, at::Tensor query_hash_code, at::Tensor query_weight, at::Tensor key_mask, at::Tensor key_hash_code, at::Tensor key_weight, at::Tensor value, int hashtable_capacity, bool use_cuda );
transformers/src/transformers/kernels/yoso/fast_lsh_cumulation.h/0
{ "file_path": "transformers/src/transformers/kernels/yoso/fast_lsh_cumulation.h", "repo_id": "transformers", "token_count": 639 }
327
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ ALBERT model configuration""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = { "albert/albert-base-v1": "https://huggingface.co/albert/albert-base-v1/resolve/main/config.json", "albert/albert-large-v1": "https://huggingface.co/albert/albert-large-v1/resolve/main/config.json", "albert/albert-xlarge-v1": "https://huggingface.co/albert/albert-xlarge-v1/resolve/main/config.json", "albert/albert-xxlarge-v1": "https://huggingface.co/albert/albert-xxlarge-v1/resolve/main/config.json", "albert/albert-base-v2": "https://huggingface.co/albert/albert-base-v2/resolve/main/config.json", "albert/albert-large-v2": "https://huggingface.co/albert/albert-large-v2/resolve/main/config.json", "albert/albert-xlarge-v2": "https://huggingface.co/albert/albert-xlarge-v2/resolve/main/config.json", "albert/albert-xxlarge-v2": "https://huggingface.co/albert/albert-xxlarge-v2/resolve/main/config.json", } class AlbertConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`AlbertModel`] or a [`TFAlbertModel`]. It is used to instantiate an ALBERT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ALBERT [albert/albert-xxlarge-v2](https://huggingface.co/albert/albert-xxlarge-v2) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30000): Vocabulary size of the ALBERT model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`AlbertModel`] or [`TFAlbertModel`]. embedding_size (`int`, *optional*, defaults to 128): Dimensionality of vocabulary embeddings. hidden_size (`int`, *optional*, defaults to 4096): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_hidden_groups (`int`, *optional*, defaults to 1): Number of groups for the hidden layers, parameters in the same group are shared. num_attention_heads (`int`, *optional*, defaults to 64): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 16384): The dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. inner_group_num (`int`, *optional*, defaults to 1): The number of inner repetition of attention and ffn. hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu_new"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`AlbertModel`] or [`TFAlbertModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. classifier_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for attached classifiers. position_embedding_type (`str`, *optional*, defaults to `"absolute"`): Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155). For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658). pad_token_id (`int`, *optional*, defaults to 0): Padding token id. bos_token_id (`int`, *optional*, defaults to 2): Beginning of stream token id. eos_token_id (`int`, *optional*, defaults to 3): End of stream token id. Examples: ```python >>> from transformers import AlbertConfig, AlbertModel >>> # Initializing an ALBERT-xxlarge style configuration >>> albert_xxlarge_configuration = AlbertConfig() >>> # Initializing an ALBERT-base style configuration >>> albert_base_configuration = AlbertConfig( ... hidden_size=768, ... num_attention_heads=12, ... intermediate_size=3072, ... ) >>> # Initializing a model (with random weights) from the ALBERT-base style configuration >>> model = AlbertModel(albert_xxlarge_configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "albert" def __init__( self, vocab_size=30000, embedding_size=128, hidden_size=4096, num_hidden_layers=12, num_hidden_groups=1, num_attention_heads=64, intermediate_size=16384, inner_group_num=1, hidden_act="gelu_new", hidden_dropout_prob=0, attention_probs_dropout_prob=0, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, classifier_dropout_prob=0.1, position_embedding_type="absolute", pad_token_id=0, bos_token_id=2, eos_token_id=3, **kwargs, ): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.embedding_size = embedding_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_hidden_groups = num_hidden_groups self.num_attention_heads = num_attention_heads self.inner_group_num = inner_group_num self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.classifier_dropout_prob = classifier_dropout_prob self.position_embedding_type = position_embedding_type # Copied from transformers.models.bert.configuration_bert.BertOnnxConfig with Roberta->Albert class AlbertOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"} else: dynamic_axis = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis), ] )
transformers/src/transformers/models/albert/configuration_albert.py/0
{ "file_path": "transformers/src/transformers/models/albert/configuration_albert.py", "repo_id": "transformers", "token_count": 3466 }
328
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Autoformer model configuration""" from typing import List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = { "huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json", } class AutoformerConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of an [`AutoformerModel`]. It is used to instantiate an Autoformer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Autoformer [huggingface/autoformer-tourism-monthly](https://huggingface.co/huggingface/autoformer-tourism-monthly) architecture. Configuration objects inherit from [`PretrainedConfig`] can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: prediction_length (`int`): The prediction length for the decoder. In other words, the prediction horizon of the model. context_length (`int`, *optional*, defaults to `prediction_length`): The context length for the encoder. If unset, the context length will be the same as the `prediction_length`. distribution_output (`string`, *optional*, defaults to `"student_t"`): The distribution emission head for the model. Could be either "student_t", "normal" or "negative_binomial". loss (`string`, *optional*, defaults to `"nll"`): The loss function for the model corresponding to the `distribution_output` head. For parametric distributions it is the negative log likelihood (nll) - which currently is the only supported one. input_size (`int`, *optional*, defaults to 1): The size of the target variable which by default is 1 for univariate targets. Would be > 1 in case of multivariate targets. lags_sequence (`list[int]`, *optional*, defaults to `[1, 2, 3, 4, 5, 6, 7]`): The lags of the input time series as covariates often dictated by the frequency. Default is `[1, 2, 3, 4, 5, 6, 7]`. scaling (`bool`, *optional* defaults to `True`): Whether to scale the input targets. num_time_features (`int`, *optional*, defaults to 0): The number of time features in the input time series. num_dynamic_real_features (`int`, *optional*, defaults to 0): The number of dynamic real valued features. num_static_categorical_features (`int`, *optional*, defaults to 0): The number of static categorical features. num_static_real_features (`int`, *optional*, defaults to 0): The number of static real valued features. cardinality (`list[int]`, *optional*): The cardinality (number of different values) for each of the static categorical features. Should be a list of integers, having the same length as `num_static_categorical_features`. Cannot be `None` if `num_static_categorical_features` is > 0. embedding_dimension (`list[int]`, *optional*): The dimension of the embedding for each of the static categorical features. Should be a list of integers, having the same length as `num_static_categorical_features`. Cannot be `None` if `num_static_categorical_features` is > 0. d_model (`int`, *optional*, defaults to 64): Dimensionality of the transformer layers. encoder_layers (`int`, *optional*, defaults to 2): Number of encoder layers. decoder_layers (`int`, *optional*, defaults to 2): Number of decoder layers. encoder_attention_heads (`int`, *optional*, defaults to 2): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (`int`, *optional*, defaults to 2): Number of attention heads for each attention layer in the Transformer decoder. encoder_ffn_dim (`int`, *optional*, defaults to 32): Dimension of the "intermediate" (often named feed-forward) layer in encoder. decoder_ffn_dim (`int`, *optional*, defaults to 32): Dimension of the "intermediate" (often named feed-forward) layer in decoder. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and decoder. If string, `"gelu"` and `"relu"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the encoder, and decoder. encoder_layerdrop (`float`, *optional*, defaults to 0.1): The dropout probability for the attention and fully connected layers for each encoder layer. decoder_layerdrop (`float`, *optional*, defaults to 0.1): The dropout probability for the attention and fully connected layers for each decoder layer. attention_dropout (`float`, *optional*, defaults to 0.1): The dropout probability for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.1): The dropout probability used between the two layers of the feed-forward networks. num_parallel_samples (`int`, *optional*, defaults to 100): The number of samples to generate in parallel for each time step of inference. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated normal weight initialization distribution. use_cache (`bool`, *optional*, defaults to `True`): Whether to use the past key/values attentions (if applicable to the model) to speed up decoding. label_length (`int`, *optional*, defaults to 10): Start token length of the Autoformer decoder, which is used for direct multi-step prediction (i.e. non-autoregressive generation). moving_average (`int`, defaults to 25): The window size of the moving average. In practice, it's the kernel size in AvgPool1d of the Decomposition Layer. autocorrelation_factor (`int`, defaults to 3): "Attention" (i.e. AutoCorrelation mechanism) factor which is used to find top k autocorrelations delays. It's recommended in the paper to set it to a number between 1 and 5. Example: ```python >>> from transformers import AutoformerConfig, AutoformerModel >>> # Initializing a default Autoformer configuration >>> configuration = AutoformerConfig() >>> # Randomly initializing a model (with random weights) from the configuration >>> model = AutoformerModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "autoformer" attribute_map = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", "num_hidden_layers": "encoder_layers", } def __init__( self, prediction_length: Optional[int] = None, context_length: Optional[int] = None, distribution_output: str = "student_t", loss: str = "nll", input_size: int = 1, lags_sequence: List[int] = [1, 2, 3, 4, 5, 6, 7], scaling: bool = True, num_time_features: int = 0, num_dynamic_real_features: int = 0, num_static_categorical_features: int = 0, num_static_real_features: int = 0, cardinality: Optional[List[int]] = None, embedding_dimension: Optional[List[int]] = None, d_model: int = 64, encoder_attention_heads: int = 2, decoder_attention_heads: int = 2, encoder_layers: int = 2, decoder_layers: int = 2, encoder_ffn_dim: int = 32, decoder_ffn_dim: int = 32, activation_function: str = "gelu", dropout: float = 0.1, encoder_layerdrop: float = 0.1, decoder_layerdrop: float = 0.1, attention_dropout: float = 0.1, activation_dropout: float = 0.1, num_parallel_samples: int = 100, init_std: float = 0.02, use_cache: bool = True, is_encoder_decoder=True, # Autoformer arguments label_length: int = 10, moving_average: int = 25, autocorrelation_factor: int = 3, **kwargs, ): # time series specific configuration self.prediction_length = prediction_length self.context_length = context_length if context_length is not None else prediction_length self.distribution_output = distribution_output self.loss = loss self.input_size = input_size self.num_time_features = num_time_features self.lags_sequence = lags_sequence self.scaling = scaling self.num_dynamic_real_features = num_dynamic_real_features self.num_static_real_features = num_static_real_features self.num_static_categorical_features = num_static_categorical_features if cardinality is not None and num_static_categorical_features > 0: if len(cardinality) != num_static_categorical_features: raise ValueError( "The cardinality should be a list of the same length as `num_static_categorical_features`" ) self.cardinality = cardinality else: self.cardinality = [0] if embedding_dimension is not None and num_static_categorical_features > 0: if len(embedding_dimension) != num_static_categorical_features: raise ValueError( "The embedding dimension should be a list of the same length as `num_static_categorical_features`" ) self.embedding_dimension = embedding_dimension else: self.embedding_dimension = [min(50, (cat + 1) // 2) for cat in self.cardinality] self.num_parallel_samples = num_parallel_samples # Transformer architecture configuration self.feature_size = input_size * len(self.lags_sequence) + self._number_of_features self.d_model = d_model self.encoder_attention_heads = encoder_attention_heads self.decoder_attention_heads = decoder_attention_heads self.encoder_ffn_dim = encoder_ffn_dim self.decoder_ffn_dim = decoder_ffn_dim self.encoder_layers = encoder_layers self.decoder_layers = decoder_layers self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.activation_function = activation_function self.init_std = init_std self.use_cache = use_cache # Autoformer self.label_length = label_length self.moving_average = moving_average self.autocorrelation_factor = autocorrelation_factor super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs) @property def _number_of_features(self) -> int: return ( sum(self.embedding_dimension) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
transformers/src/transformers/models/autoformer/configuration_autoformer.py/0
{ "file_path": "transformers/src/transformers/models/autoformer/configuration_autoformer.py", "repo_id": "transformers", "token_count": 4684 }
329
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Huggingface Pytorch checkpoint to Tensorflow checkpoint.""" import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def convert_pytorch_checkpoint_to_tf(model: BertModel, ckpt_dir: str, model_name: str): """ Args: model: BertModel Pytorch model instance to be converted ckpt_dir: Tensorflow model directory model_name: model name Currently supported HF models: - Y BertModel - N BertForMaskedLM - N BertForPreTraining - N BertForMultipleChoice - N BertForNextSentencePrediction - N BertForSequenceClassification - N BertForQuestionAnswering """ tensors_to_transpose = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value") var_map = ( ("layer.", "layer_"), ("word_embeddings.weight", "word_embeddings"), ("position_embeddings.weight", "position_embeddings"), ("token_type_embeddings.weight", "token_type_embeddings"), (".", "/"), ("LayerNorm/weight", "LayerNorm/gamma"), ("LayerNorm/bias", "LayerNorm/beta"), ("weight", "kernel"), ) if not os.path.isdir(ckpt_dir): os.makedirs(ckpt_dir) state_dict = model.state_dict() def to_tf_var_name(name: str): for patt, repl in iter(var_map): name = name.replace(patt, repl) return f"bert/{name}" def create_tf_var(tensor: np.ndarray, name: str, session: tf.Session): tf_dtype = tf.dtypes.as_dtype(tensor.dtype) tf_var = tf.get_variable(dtype=tf_dtype, shape=tensor.shape, name=name, initializer=tf.zeros_initializer()) session.run(tf.variables_initializer([tf_var])) session.run(tf_var) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: tf_name = to_tf_var_name(var_name) torch_tensor = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose): torch_tensor = torch_tensor.T tf_var = create_tf_var(tensor=torch_tensor, name=tf_name, session=session) tf_var.assign(tf.cast(torch_tensor, tf_var.dtype)) tf_weight = session.run(tf_var) print(f"Successfully created {tf_name}: {np.allclose(tf_weight, torch_tensor)}") saver = tf.train.Saver(tf.trainable_variables()) saver.save(session, os.path.join(ckpt_dir, model_name.replace("-", "_") + ".ckpt")) def main(raw_args=None): parser = argparse.ArgumentParser() parser.add_argument("--model_name", type=str, required=True, help="model name e.g. google-bert/bert-base-uncased") parser.add_argument( "--cache_dir", type=str, default=None, required=False, help="Directory containing pytorch model" ) parser.add_argument("--pytorch_model_path", type=str, required=True, help="/path/to/<pytorch-model-name>.bin") parser.add_argument("--tf_cache_dir", type=str, required=True, help="Directory in which to save tensorflow model") args = parser.parse_args(raw_args) model = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name, state_dict=torch.load(args.pytorch_model_path), cache_dir=args.cache_dir, ) convert_pytorch_checkpoint_to_tf(model=model, ckpt_dir=args.tf_cache_dir, model_name=args.model_name) if __name__ == "__main__": main()
transformers/src/transformers/models/bert/convert_bert_pytorch_checkpoint_to_original_tf.py/0
{ "file_path": "transformers/src/transformers/models/bert/convert_bert_pytorch_checkpoint_to_original_tf.py", "repo_id": "transformers", "token_count": 1660 }
330
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _import_structure = { "configuration_big_bird": ["BIG_BIRD_PRETRAINED_CONFIG_ARCHIVE_MAP", "BigBirdConfig", "BigBirdOnnxConfig"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_big_bird"] = ["BigBirdTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_big_bird_fast"] = ["BigBirdTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_big_bird"] = [ "BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST", "BigBirdForCausalLM", "BigBirdForMaskedLM", "BigBirdForMultipleChoice", "BigBirdForPreTraining", "BigBirdForQuestionAnswering", "BigBirdForSequenceClassification", "BigBirdForTokenClassification", "BigBirdLayer", "BigBirdModel", "BigBirdPreTrainedModel", "load_tf_weights_in_big_bird", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_flax_big_bird"] = [ "FlaxBigBirdForCausalLM", "FlaxBigBirdForMaskedLM", "FlaxBigBirdForMultipleChoice", "FlaxBigBirdForPreTraining", "FlaxBigBirdForQuestionAnswering", "FlaxBigBirdForSequenceClassification", "FlaxBigBirdForTokenClassification", "FlaxBigBirdModel", "FlaxBigBirdPreTrainedModel", ] if TYPE_CHECKING: from .configuration_big_bird import BIG_BIRD_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdConfig, BigBirdOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_big_bird import BigBirdTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_big_bird_fast import BigBirdTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_big_bird import ( BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdForCausalLM, BigBirdForMaskedLM, BigBirdForMultipleChoice, BigBirdForPreTraining, BigBirdForQuestionAnswering, BigBirdForSequenceClassification, BigBirdForTokenClassification, BigBirdLayer, BigBirdModel, BigBirdPreTrainedModel, load_tf_weights_in_big_bird, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_big_bird import ( FlaxBigBirdForCausalLM, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForPreTraining, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, FlaxBigBirdModel, FlaxBigBirdPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
transformers/src/transformers/models/big_bird/__init__.py/0
{ "file_path": "transformers/src/transformers/models/big_bird/__init__.py", "repo_id": "transformers", "token_count": 1883 }
331
# coding=utf-8 # Copyright 2021 The Facebook, Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Flax BlenderbotSmall model.""" import math import random from functools import partial from typing import Callable, Optional, Tuple import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.linen import combine_masks, make_causal_mask from flax.linen.attention import dot_product_attention_weights from flax.traverse_util import flatten_dict, unflatten_dict from jax import lax from jax.random import PRNGKey from ...modeling_flax_outputs import ( FlaxBaseModelOutput, FlaxBaseModelOutputWithPastAndCrossAttentions, FlaxCausalLMOutputWithCrossAttentions, FlaxSeq2SeqLMOutput, FlaxSeq2SeqModelOutput, ) from ...modeling_flax_utils import ( ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring, append_replace_return_docstrings, overwrite_call_docstring, ) from ...utils import add_start_docstrings, logging, replace_return_docstrings from .configuration_blenderbot_small import BlenderbotSmallConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "facebook/blenderbot_small-90M" _CONFIG_FOR_DOC = "BlenderbotSmallConfig" BLENDERBOT_SMALL_START_DOCSTRING = r""" This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a Flax Linen [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior. Finally, this model supports inherent JAX features such as: - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) Parameters: config ([`BlenderbotSmallConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights. dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`): The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and `jax.numpy.bfloat16` (on TPUs). This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given `dtype`. **Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.** If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and [`~FlaxPreTrainedModel.to_bf16`]. """ BLENDERBOT_SMALL_INPUTS_DOCSTRING = r""" Args: input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) For translation and summarization training, `decoder_input_ids` should be provided. If no `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right for denoising pre-training following the paper. decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ BLENDERBOT_SMALL_ENCODE_INPUTS_DOCSTRING = r""" Args: input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ BLENDERBOT_SMALL_DECODE_INPUTS_DOCSTRING = r""" Args: decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) For translation and summarization training, `decoder_input_ids` should be provided. If no `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right for denoising pre-training following the paper. encoder_outputs (`tuple(tuple(jnp.ndarray)`): Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`): Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ # Copied from transformers.models.bart.modeling_flax_bart.shift_tokens_right def shift_tokens_right(input_ids: jnp.ndarray, pad_token_id: int, decoder_start_token_id: int) -> jnp.ndarray: """ Shift input ids one token to the right. """ shifted_input_ids = jnp.zeros_like(input_ids) shifted_input_ids = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1]) shifted_input_ids = shifted_input_ids.at[:, 0].set(decoder_start_token_id) shifted_input_ids = jnp.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids) return shifted_input_ids # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartAttention with Bart->BlenderbotSmall class FlaxBlenderbotSmallAttention(nn.Module): config: BlenderbotSmallConfig embed_dim: int num_heads: int dropout: float = 0.0 causal: bool = False bias: bool = True dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self) -> None: self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {self.num_heads})." ) dense = partial( nn.Dense, self.embed_dim, use_bias=self.bias, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std), ) self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense() self.out_proj = dense() self.dropout_layer = nn.Dropout(rate=self.dropout) if self.causal: self.causal_mask = make_causal_mask( jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool" ) def _split_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim)) def _merge_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,)) @nn.compact def _concatenate_to_cache(self, key, value, query, attention_mask): """ This function takes projected key, value states from a single input token and concatenates the states to cached states from previous steps. This function is slighly adapted from the official Flax repository: https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252 """ # detect if we're initializing by absence of existing cache data. is_initialized = self.has_variable("cache", "cached_key") cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype) cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype) cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32)) if is_initialized: *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape # update key, value caches with our new 1d spatial slices cur_index = cache_index.value indices = (0,) * len(batch_dims) + (cur_index, 0, 0) key = lax.dynamic_update_slice(cached_key.value, key, indices) value = lax.dynamic_update_slice(cached_value.value, value, indices) cached_key.value = key cached_value.value = value num_updated_cache_vectors = query.shape[1] cache_index.value = cache_index.value + num_updated_cache_vectors # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements. pad_mask = jnp.broadcast_to( jnp.arange(max_length) < cur_index + num_updated_cache_vectors, tuple(batch_dims) + (1, num_updated_cache_vectors, max_length), ) attention_mask = combine_masks(pad_mask, attention_mask) return key, value, attention_mask def __call__( self, hidden_states: jnp.ndarray, key_value_states: Optional[jnp.ndarray] = None, attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, ) -> Tuple[jnp.ndarray]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None batch_size = hidden_states.shape[0] # get query proj query_states = self.q_proj(hidden_states) # get key, value proj if is_cross_attention: # cross_attentions key_states = self.k_proj(key_value_states) value_states = self.v_proj(key_value_states) else: # self_attention key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = self._split_heads(query_states) key_states = self._split_heads(key_states) value_states = self._split_heads(value_states) # handle cache prepare causal attention mask if self.causal: query_length, key_length = query_states.shape[1], key_states.shape[1] if self.has_variable("cache", "cached_key"): mask_shift = self.variables["cache"]["cache_index"] max_decoder_length = self.variables["cache"]["cached_key"].shape[1] causal_mask = lax.dynamic_slice( self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length) ) else: causal_mask = self.causal_mask[:, :, :query_length, :key_length] causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:]) # combine masks if needed if attention_mask is not None and self.causal: attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape) attention_mask = combine_masks(attention_mask, causal_mask) elif self.causal: attention_mask = causal_mask elif attention_mask is not None: attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2)) # During fast autoregressive decoding, we feed one position at a time, # and cache the keys and values step by step. if self.causal and (self.has_variable("cache", "cached_key") or init_cache): key_states, value_states, attention_mask = self._concatenate_to_cache( key_states, value_states, query_states, attention_mask ) # Convert the boolean attention mask to an attention bias. if attention_mask is not None: # attention mask in the form of attention bias attention_bias = lax.select( attention_mask > 0, jnp.full(attention_mask.shape, 0.0).astype(self.dtype), jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype), ) else: attention_bias = None dropout_rng = None if not deterministic and self.dropout > 0.0: dropout_rng = self.make_rng("dropout") attn_weights = dot_product_attention_weights( query_states, key_states, bias=attention_bias, dropout_rng=dropout_rng, dropout_rate=self.dropout, broadcast_dropout=True, deterministic=deterministic, dtype=self.dtype, precision=None, ) attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states) attn_output = self._merge_heads(attn_output) attn_output = self.out_proj(attn_output) return attn_output, attn_weights # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartEncoderLayer with Bart->BlenderbotSmall class FlaxBlenderbotSmallEncoderLayer(nn.Module): config: BlenderbotSmallConfig dtype: jnp.dtype = jnp.float32 def setup(self) -> None: self.embed_dim = self.config.d_model self.self_attn = FlaxBlenderbotSmallAttention( config=self.config, embed_dim=self.embed_dim, num_heads=self.config.encoder_attention_heads, dropout=self.config.attention_dropout, dtype=self.dtype, ) self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) self.dropout_layer = nn.Dropout(rate=self.config.dropout) self.activation_fn = ACT2FN[self.config.activation_function] self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout) self.fc1 = nn.Dense( self.config.encoder_ffn_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std), ) self.fc2 = nn.Dense( self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std) ) self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) def __call__( self, hidden_states: jnp.ndarray, attention_mask: jnp.ndarray, output_attentions: bool = True, deterministic: bool = True, ) -> Tuple[jnp.ndarray]: residual = hidden_states hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartEncoderLayerCollection with Bart->BlenderbotSmall class FlaxBlenderbotSmallEncoderLayerCollection(nn.Module): config: BlenderbotSmallConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.layers = [ FlaxBlenderbotSmallEncoderLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.encoder_layers) ] self.layerdrop = self.config.encoder_layerdrop def __call__( self, hidden_states, attention_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for encoder_layer in self.layers: if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = random.uniform(0, 1) if not deterministic and (dropout_probability < self.layerdrop): # skip the layer layer_outputs = (None, None) else: layer_outputs = encoder_layer( hidden_states, attention_mask, output_attentions, deterministic, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states += (hidden_states,) outputs = (hidden_states, all_hidden_states, all_attentions) if not return_dict: return tuple(v for v in outputs if v is not None) return FlaxBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions ) # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartDecoderLayer with Bart->BlenderbotSmall class FlaxBlenderbotSmallDecoderLayer(nn.Module): config: BlenderbotSmallConfig dtype: jnp.dtype = jnp.float32 def setup(self) -> None: self.embed_dim = self.config.d_model self.self_attn = FlaxBlenderbotSmallAttention( config=self.config, embed_dim=self.embed_dim, num_heads=self.config.decoder_attention_heads, dropout=self.config.attention_dropout, causal=True, dtype=self.dtype, ) self.dropout_layer = nn.Dropout(rate=self.config.dropout) self.activation_fn = ACT2FN[self.config.activation_function] self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout) self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) self.encoder_attn = FlaxBlenderbotSmallAttention( config=self.config, embed_dim=self.embed_dim, num_heads=self.config.decoder_attention_heads, dropout=self.config.attention_dropout, dtype=self.dtype, ) self.encoder_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) self.fc1 = nn.Dense( self.config.decoder_ffn_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std), ) self.fc2 = nn.Dense( self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std) ) self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) def __call__( self, hidden_states: jnp.ndarray, attention_mask: jnp.ndarray, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, output_attentions: bool = True, deterministic: bool = True, ) -> Tuple[jnp.ndarray]: residual = hidden_states # Self Attention hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, init_cache=init_cache ) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # Cross-Attention Block cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states, cross_attn_weights = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, ) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) hidden_states = residual + hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) # Fully Connected residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) return outputs # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartDecoderLayerCollection with Bart->BlenderbotSmall class FlaxBlenderbotSmallDecoderLayerCollection(nn.Module): config: BlenderbotSmallConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.layers = [ FlaxBlenderbotSmallDecoderLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.decoder_layers) ] self.layerdrop = self.config.decoder_layerdrop def __call__( self, hidden_states, attention_mask, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, deterministic: bool = True, init_cache: bool = False, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None for decoder_layer in self.layers: if output_hidden_states: all_hidden_states += (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = random.uniform(0, 1) if not deterministic and (dropout_probability < self.layerdrop): layer_outputs = (None, None, None) else: layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, init_cache=init_cache, output_attentions=output_attentions, deterministic=deterministic, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) outputs = [hidden_states, all_hidden_states, all_self_attns, all_cross_attentions] if not return_dict: return tuple(v for v in outputs if v is not None) return FlaxBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, ) class FlaxBlenderbotSmallEncoder(nn.Module): config: BlenderbotSmallConfig embed_tokens: nn.Embed dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dropout_layer = nn.Dropout(rate=self.config.dropout) embed_dim = self.config.d_model self.padding_idx = self.config.pad_token_id self.max_source_positions = self.config.max_position_embeddings self.embed_scale = math.sqrt(embed_dim) if self.config.scale_embedding else 1.0 self.embed_positions = nn.Embed( self.config.max_position_embeddings, embed_dim, embedding_init=jax.nn.initializers.normal(self.config.init_std), ) self.layers = FlaxBlenderbotSmallEncoderLayerCollection(self.config, self.dtype) self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) def __call__( self, input_ids, attention_mask, position_ids, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, deterministic: bool = True, ): input_shape = input_ids.shape input_ids = input_ids.reshape(-1, input_shape[-1]) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(position_ids) hidden_states = inputs_embeds + embed_pos hidden_states = self.layernorm_embedding(hidden_states) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) outputs = self.layers( hidden_states, attention_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return outputs return FlaxBaseModelOutput( last_hidden_state=outputs.last_hidden_state, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class FlaxBlenderbotSmallDecoder(nn.Module): config: BlenderbotSmallConfig embed_tokens: nn.Embed dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dropout_layer = nn.Dropout(rate=self.config.dropout) embed_dim = self.config.d_model self.padding_idx = self.config.pad_token_id self.max_target_positions = self.config.max_position_embeddings self.embed_scale = math.sqrt(self.config.d_model) if self.config.scale_embedding else 1.0 self.embed_positions = nn.Embed( self.config.max_position_embeddings, embed_dim, embedding_init=jax.nn.initializers.normal(self.config.init_std), ) self.layers = FlaxBlenderbotSmallDecoderLayerCollection(self.config, self.dtype) self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) def __call__( self, input_ids, attention_mask, position_ids, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, deterministic: bool = True, ): input_shape = input_ids.shape input_ids = input_ids.reshape(-1, input_shape[-1]) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale # embed positions positions = self.embed_positions(position_ids) # BlenderbotSmall applies layer norm on inputs_embeds in decoder inputs_embeds = self.layernorm_embedding(inputs_embeds) hidden_states = inputs_embeds + positions hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) outputs = self.layers( hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return outputs return FlaxBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=outputs.last_hidden_state, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartModule with Bart->BlenderbotSmall class FlaxBlenderbotSmallModule(nn.Module): config: BlenderbotSmallConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.shared = nn.Embed( self.config.vocab_size, self.config.d_model, embedding_init=jax.nn.initializers.normal(self.config.init_std), dtype=self.dtype, ) self.encoder = FlaxBlenderbotSmallEncoder(self.config, dtype=self.dtype, embed_tokens=self.shared) self.decoder = FlaxBlenderbotSmallDecoder(self.config, dtype=self.dtype, embed_tokens=self.shared) def _get_encoder_module(self): return self.encoder def _get_decoder_module(self): return self.decoder def __call__( self, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, position_ids, decoder_position_ids, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, deterministic: bool = True, ): encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, ) decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, position_ids=decoder_position_ids, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, ) if not return_dict: return decoder_outputs + encoder_outputs return FlaxSeq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) class FlaxBlenderbotSmallPreTrainedModel(FlaxPreTrainedModel): config_class = BlenderbotSmallConfig base_model_prefix: str = "model" module_class: nn.Module = None def __init__( self, config: BlenderbotSmallConfig, input_shape: Tuple[int] = (1, 1), seed: int = 0, dtype: jnp.dtype = jnp.float32, _do_init: bool = True, **kwargs, ): module = self.module_class(config=config, dtype=dtype, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: # init input tensors input_ids = jnp.zeros(input_shape, dtype="i4") # make sure initialization pass will work for FlaxBlenderbotSmallForSequenceClassificationModule input_ids = input_ids.at[(..., -1)].set(self.config.eos_token_id) attention_mask = jnp.ones_like(input_ids) decoder_input_ids = input_ids decoder_attention_mask = jnp.ones_like(input_ids) batch_size, sequence_length = input_ids.shape position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng} random_params = self.module.init( rngs, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, position_ids, decoder_position_ids, )["params"] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params def init_cache(self, batch_size, max_length, encoder_outputs): r""" Args: batch_size (`int`): batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache. max_length (`int`): maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized cache. encoder_outputs (`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`): `encoder_outputs` consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`). `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. """ # init input variables to retrieve cache decoder_input_ids = jnp.ones((batch_size, max_length), dtype="i4") decoder_attention_mask = jnp.ones_like(decoder_input_ids) decoder_position_ids = jnp.broadcast_to( jnp.arange(jnp.atleast_2d(decoder_input_ids).shape[-1]), decoder_input_ids.shape ) def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs): decoder_module = module._get_decoder_module() return decoder_module( decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs, ) init_variables = self.module.init( jax.random.PRNGKey(0), decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, encoder_hidden_states=encoder_outputs[0], init_cache=True, method=_decoder_forward, # we only need to call the decoder to init the cache ) return unfreeze(init_variables["cache"]) @add_start_docstrings(BLENDERBOT_SMALL_ENCODE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FlaxBaseModelOutput, config_class=BlenderbotSmallConfig) def encode( self, input_ids: jnp.ndarray, attention_mask: Optional[jnp.ndarray] = None, position_ids: Optional[jnp.ndarray] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, train: bool = False, params: dict = None, dropout_rng: PRNGKey = None, ): r""" Returns: Example: ```python >>> from transformers import AutoTokenizer, FlaxBlenderbotSmallForConditionalGeneration >>> model = FlaxBlenderbotSmallForConditionalGeneration.from_pretrained("facebook/blenderbot_small-90M") >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M") >>> text = "My friends are cool but they eat too many carbs." >>> inputs = tokenizer(text, max_length=1024, return_tensors="np") >>> encoder_outputs = model.encode(**inputs) ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict if attention_mask is None: attention_mask = jnp.ones_like(input_ids) if position_ids is None: batch_size, sequence_length = input_ids.shape position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng def _encoder_forward(module, input_ids, attention_mask, position_ids, **kwargs): encode_module = module._get_encoder_module() return encode_module(input_ids, attention_mask, position_ids, **kwargs) return self.module.apply( {"params": params or self.params}, input_ids=jnp.array(input_ids, dtype="i4"), attention_mask=jnp.array(attention_mask, dtype="i4"), position_ids=jnp.array(position_ids, dtype="i4"), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, method=_encoder_forward, ) @add_start_docstrings(BLENDERBOT_SMALL_DECODE_INPUTS_DOCSTRING) @replace_return_docstrings( output_type=FlaxBaseModelOutputWithPastAndCrossAttentions, config_class=BlenderbotSmallConfig ) def decode( self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray] = None, decoder_attention_mask: Optional[jnp.ndarray] = None, decoder_position_ids: Optional[jnp.ndarray] = None, past_key_values: dict = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, train: bool = False, params: dict = None, dropout_rng: PRNGKey = None, ): r""" Returns: Example: ```python >>> import jax.numpy as jnp >>> from transformers import AutoTokenizer, FlaxBlenderbotSmallForConditionalGeneration >>> model = FlaxBlenderbotSmallForConditionalGeneration.from_pretrained("facebook/blenderbot_small-90M") >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M") >>> text = "My friends are cool but they eat too many carbs." >>> inputs = tokenizer(text, max_length=1024, return_tensors="np") >>> encoder_outputs = model.encode(**inputs) >>> decoder_start_token_id = model.config.decoder_start_token_id >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id >>> outputs = model.decode(decoder_input_ids, encoder_outputs) >>> last_decoder_hidden_states = outputs.last_hidden_state ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict encoder_hidden_states = encoder_outputs[0] if encoder_attention_mask is None: batch_size, sequence_length = encoder_hidden_states.shape[:2] encoder_attention_mask = jnp.ones((batch_size, sequence_length)) batch_size, sequence_length = decoder_input_ids.shape if decoder_attention_mask is None: decoder_attention_mask = jnp.ones((batch_size, sequence_length)) if decoder_position_ids is None: if past_key_values is not None: raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.") decoder_position_ids = jnp.broadcast_to( jnp.arange(sequence_length)[None, :], (batch_size, sequence_length) ) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng inputs = {"params": params or self.params} # if past_key_values are passed then cache is already initialized a private flag init_cache has to be # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that # it can be changed by FlaxBlenderbotSmallAttention module if past_key_values: inputs["cache"] = past_key_values mutable = ["cache"] else: mutable = False def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs): decoder_module = module._get_decoder_module() return decoder_module( decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs, ) outputs = self.module.apply( inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"), decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward, ) # add updated cache to model output if past_key_values is not None and return_dict: outputs, past = outputs outputs["past_key_values"] = unfreeze(past["cache"]) return outputs elif past_key_values is not None and not return_dict: outputs, past = outputs outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:] return outputs def __call__( self, input_ids: jnp.ndarray, attention_mask: Optional[jnp.ndarray] = None, decoder_input_ids: Optional[jnp.ndarray] = None, decoder_attention_mask: Optional[jnp.ndarray] = None, position_ids: Optional[jnp.ndarray] = None, decoder_position_ids: Optional[jnp.ndarray] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, train: bool = False, params: dict = None, dropout_rng: PRNGKey = None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict # prepare encoder inputs if attention_mask is None: attention_mask = jnp.ones_like(input_ids) if position_ids is None: batch_size, sequence_length = input_ids.shape position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) # prepare decoder inputs if decoder_input_ids is None: decoder_input_ids = shift_tokens_right( input_ids, self.config.pad_token_id, decoder_start_token_id=self.config.decoder_start_token_id ) if decoder_attention_mask is None: decoder_attention_mask = jnp.ones_like(decoder_input_ids) if decoder_position_ids is None: batch_size, sequence_length = decoder_input_ids.shape decoder_position_ids = jnp.broadcast_to( jnp.arange(sequence_length)[None, :], (batch_size, sequence_length) ) # Handle any PRNG if needed rngs = {"dropout": dropout_rng} if dropout_rng is not None else {} return self.module.apply( {"params": params or self.params}, input_ids=jnp.array(input_ids, dtype="i4"), attention_mask=jnp.array(attention_mask, dtype="i4"), position_ids=jnp.array(position_ids, dtype="i4"), decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"), decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, ) @add_start_docstrings( "The bare BlenderbotSmall Model transformer outputting raw hidden-states without any specific head on top.", BLENDERBOT_SMALL_START_DOCSTRING, ) class FlaxBlenderbotSmallModel(FlaxBlenderbotSmallPreTrainedModel): config: BlenderbotSmallConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation module_class = FlaxBlenderbotSmallModule append_call_sample_docstring(FlaxBlenderbotSmallModel, _CHECKPOINT_FOR_DOC, FlaxSeq2SeqModelOutput, _CONFIG_FOR_DOC) # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartForConditionalGenerationModule with Bart->BlenderbotSmall class FlaxBlenderbotSmallForConditionalGenerationModule(nn.Module): config: BlenderbotSmallConfig dtype: jnp.dtype = jnp.float32 bias_init: Callable[..., jnp.ndarray] = jax.nn.initializers.zeros def setup(self): self.model = FlaxBlenderbotSmallModule(config=self.config, dtype=self.dtype) self.lm_head = nn.Dense( self.model.shared.num_embeddings, use_bias=False, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std), ) self.final_logits_bias = self.param("final_logits_bias", self.bias_init, (1, self.model.shared.num_embeddings)) def _get_encoder_module(self): return self.model.encoder def _get_decoder_module(self): return self.model.decoder def __call__( self, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, position_ids, decoder_position_ids, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, deterministic: bool = True, ): outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, position_ids=position_ids, decoder_position_ids=decoder_position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, ) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_embedding = self.model.variables["params"]["shared"]["embedding"] lm_logits = self.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states) else: lm_logits = self.lm_head(hidden_states) lm_logits += jax.lax.stop_gradient(self.final_logits_bias.astype(self.dtype)) if not return_dict: output = (lm_logits,) + outputs[1:] return output return FlaxSeq2SeqLMOutput( logits=lm_logits, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) @add_start_docstrings( "The BLENDERBOT_SMALL Model with a language modeling head. Can be used for summarization.", BLENDERBOT_SMALL_START_DOCSTRING, ) class FlaxBlenderbotSmallForConditionalGeneration(FlaxBlenderbotSmallPreTrainedModel): module_class = FlaxBlenderbotSmallForConditionalGenerationModule dtype: jnp.dtype = jnp.float32 @add_start_docstrings(BLENDERBOT_SMALL_DECODE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FlaxCausalLMOutputWithCrossAttentions, config_class=BlenderbotSmallConfig) def decode( self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray] = None, decoder_attention_mask: Optional[jnp.ndarray] = None, decoder_position_ids: Optional[jnp.ndarray] = None, past_key_values: dict = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, deterministic: bool = True, params: dict = None, dropout_rng: PRNGKey = None, ): r""" Returns: Example: ```python >>> import jax.numpy as jnp >>> from transformers import AutoTokenizer, FlaxBlenderbotSmallForConditionalGeneration >>> model = FlaxBlenderbotSmallForConditionalGeneration.from_pretrained("facebook/blenderbot_small-90M") >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M") >>> text = "My friends are cool but they eat too many carbs." >>> inputs = tokenizer(text, max_length=1024, return_tensors="np") >>> encoder_outputs = model.encode(**inputs) >>> decoder_start_token_id = model.config.decoder_start_token_id >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id >>> outputs = model.decode(decoder_input_ids, encoder_outputs) >>> logits = outputs.logits ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict encoder_hidden_states = encoder_outputs[0] if encoder_attention_mask is None: batch_size, sequence_length = encoder_hidden_states.shape[:2] encoder_attention_mask = jnp.ones((batch_size, sequence_length)) batch_size, sequence_length = decoder_input_ids.shape if decoder_attention_mask is None: decoder_attention_mask = jnp.ones((batch_size, sequence_length)) if decoder_position_ids is None: if past_key_values is not None: raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.") decoder_position_ids = jnp.broadcast_to( jnp.arange(sequence_length)[None, :], (batch_size, sequence_length) ) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng inputs = {"params": params or self.params} # if past_key_values are passed then cache is already initialized a private flag init_cache has to be # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that # it can be changed by FlaxBlenderbotSmallAttention module if past_key_values: inputs["cache"] = past_key_values mutable = ["cache"] else: mutable = False def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs): decoder_module = module._get_decoder_module() outputs = decoder_module( decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs, ) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_embedding = module.model.variables["params"]["shared"]["embedding"] lm_logits = module.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states) else: lm_logits = module.lm_head(hidden_states) lm_logits += module.final_logits_bias.astype(self.dtype) return lm_logits, outputs outputs = self.module.apply( inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"), decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, rngs=rngs, mutable=mutable, method=_decoder_forward, ) if past_key_values is None: lm_logits, decoder_outputs = outputs else: (lm_logits, decoder_outputs), past = outputs if return_dict: outputs = FlaxCausalLMOutputWithCrossAttentions( logits=lm_logits, hidden_states=decoder_outputs.hidden_states, attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, ) else: outputs = (lm_logits,) + decoder_outputs[1:] # add updated cache to model output if past_key_values is not None and return_dict: outputs["past_key_values"] = unfreeze(past["cache"]) return outputs elif past_key_values is not None and not return_dict: outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:] return outputs def prepare_inputs_for_generation( self, decoder_input_ids, max_length, attention_mask: Optional[jax.Array] = None, decoder_attention_mask: Optional[jax.Array] = None, encoder_outputs=None, **kwargs, ): # initializing the cache batch_size, seq_length = decoder_input_ids.shape past_key_values = self.init_cache(batch_size, max_length, encoder_outputs) # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length. # But since the decoder uses a causal mask, those positions are masked anyways. # Thus we can create a single static attention_mask here, which is more efficient for compilation extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4") if decoder_attention_mask is not None: position_ids = decoder_attention_mask.cumsum(axis=-1) - 1 extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, decoder_attention_mask, (0, 0)) else: position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length)) return { "past_key_values": past_key_values, "encoder_outputs": encoder_outputs, "encoder_attention_mask": attention_mask, "decoder_attention_mask": extended_attention_mask, "decoder_position_ids": position_ids, } def update_inputs_for_generation(self, model_outputs, model_kwargs): model_kwargs["past_key_values"] = model_outputs.past_key_values model_kwargs["decoder_position_ids"] = model_kwargs["decoder_position_ids"][:, -1:] + 1 return model_kwargs FLAX_BLENDERBOT_SMALL_CONDITIONAL_GENERATION_DOCSTRING = """ Returns: Summarization example: ```py >>> from transformers import AutoTokenizer, FlaxBlenderbotSmallForConditionalGeneration >>> model = FlaxBlenderbotSmallForConditionalGeneration.from_pretrained("facebook/blenderbot_small-90M") >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M") >>> ARTICLE_TO_SUMMARIZE = "My friends are cool but they eat too many carbs." >>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors="np") >>> # Generate Summary >>> summary_ids = model.generate(inputs["input_ids"]).sequences >>> print(tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)) ``` Mask filling example: ```py >>> from transformers import AutoTokenizer, FlaxBlenderbotSmallForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M") >>> TXT = "My friends are <mask> but they eat too many carbs." >>> model = FlaxBlenderbotSmallForConditionalGeneration.from_pretrained("facebook/blenderbot_small-90M") >>> input_ids = tokenizer([TXT], return_tensors="np")["input_ids"] >>> logits = model(input_ids).logits >>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item() >>> probs = jax.nn.softmax(logits[0, masked_index], axis=0) >>> values, predictions = jax.lax.top_k(probs) >>> tokenizer.decode(predictions).split() ``` """ overwrite_call_docstring( FlaxBlenderbotSmallForConditionalGeneration, BLENDERBOT_SMALL_INPUTS_DOCSTRING + FLAX_BLENDERBOT_SMALL_CONDITIONAL_GENERATION_DOCSTRING, ) append_replace_return_docstrings( FlaxBlenderbotSmallForConditionalGeneration, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC )
transformers/src/transformers/models/blenderbot_small/modeling_flax_blenderbot_small.py/0
{ "file_path": "transformers/src/transformers/models/blenderbot_small/modeling_flax_blenderbot_small.py", "repo_id": "transformers", "token_count": 28645 }
332
# coding=utf-8 # Copyright 2023 The Salesforce Authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch BLIP-2 model.""" import math from dataclasses import dataclass from typing import Any, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ..auto import AutoModelForCausalLM, AutoModelForSeq2SeqLM from .configuration_blip_2 import Blip2Config, Blip2QFormerConfig, Blip2VisionConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "Salesforce/blip2-opt-2.7b" BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST = [ "Salesforce/blip2-opt-2.7b", # See all BLIP-2 models at https://huggingface.co/models?filter=blip ] @dataclass class Blip2ForConditionalGenerationModelOutput(ModelOutput): """ Class defining the outputs of [`Blip2ForConditionalGeneration`]. Args: loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): Language modeling loss from the language model. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head of the language model. vision_outputs (`BaseModelOutputWithPooling`): Outputs of the vision encoder. qformer_outputs (`BaseModelOutputWithPoolingAndCrossAttentions`): Outputs of the Q-Former (Querying Transformer). language_model_outputs (`CausalLMOutputWithPast` or `Seq2SeqLMOutput`): Outputs of the language model. """ loss: Optional[Tuple[torch.FloatTensor]] = None logits: Optional[Tuple[torch.FloatTensor]] = None vision_outputs: Optional[torch.FloatTensor] = None qformer_outputs: Optional[Tuple[torch.FloatTensor]] = None language_model_outputs: Optional[Tuple[torch.FloatTensor]] = None def to_tuple(self) -> Tuple[Any]: return tuple( self[k] if k not in ["vision_outputs", "qformer_outputs", "language_model_outputs"] else getattr(self, k).to_tuple() for k in self.keys() ) # Copied from transformers.models.blip.modeling_blip.BlipVisionEmbeddings with Blip->Blip2 class Blip2VisionEmbeddings(nn.Module): def __init__(self, config: Blip2VisionConfig): super().__init__() self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.class_embedding = nn.Parameter(torch.randn(1, 1, self.embed_dim)) self.patch_embedding = nn.Conv2d( in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size ) self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim)) def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: batch_size = pixel_values.shape[0] target_dtype = self.patch_embedding.weight.dtype patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid] patch_embeds = patch_embeds.flatten(2).transpose(1, 2) class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype) embeddings = torch.cat([class_embeds, patch_embeds], dim=1) embeddings = embeddings + self.position_embedding[:, : embeddings.size(1), :].to(target_dtype) return embeddings class Blip2Attention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) self.scale = self.head_dim**-0.5 self.dropout = nn.Dropout(config.attention_dropout) # small tweak here compared to CLIP, no bias here self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim, bias=False) if config.qkv_bias: q_bias = nn.Parameter(torch.zeros(self.embed_dim)) v_bias = nn.Parameter(torch.zeros(self.embed_dim)) else: q_bias = None v_bias = None if q_bias is not None: qkv_bias = torch.cat((q_bias, torch.zeros_like(v_bias, requires_grad=False), v_bias)) self.qkv.bias = nn.Parameter(qkv_bias) self.projection = nn.Linear(self.embed_dim, self.embed_dim) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" bsz, tgt_len, embed_dim = hidden_states.size() mixed_qkv = self.qkv(hidden_states) mixed_qkv = mixed_qkv.reshape(bsz, tgt_len, 3, self.num_heads, embed_dim // self.num_heads).permute( 2, 0, 3, 1, 4 ) query_states, key_states, value_states = mixed_qkv[0], mixed_qkv[1], mixed_qkv[2] # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_states, key_states.transpose(-1, -2)) attention_scores = attention_scores * self.scale # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_states).permute(0, 2, 1, 3) new_context_layer_shape = context_layer.size()[:-2] + (self.embed_dim,) context_layer = context_layer.reshape(new_context_layer_shape) output = self.projection(context_layer) outputs = (output, attention_probs) if output_attentions else (output, None) return outputs # Copied from transformers.models.blip.modeling_blip.BlipMLP class Blip2MLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states # Copied from transformers.models.blip.modeling_blip.BlipEncoderLayer with Blip->Blip2 class Blip2EncoderLayer(nn.Module): def __init__(self, config: Blip2Config): super().__init__() self.embed_dim = config.hidden_size self.self_attn = Blip2Attention(config) self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = Blip2MLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.FloatTensor]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. `(config.encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.layer_norm1(hidden_states) hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, head_mask=attention_mask, output_attentions=output_attentions, ) hidden_states = hidden_states + residual residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = hidden_states + residual outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class Blip2PreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = Blip2Config base_model_prefix = "blip" supports_gradient_checkpointing = True _no_split_modules = ["Blip2Attention", "T5Block", "OPTDecoderLayer"] _skip_keys_device_placement = "past_key_values" _keep_in_fp32_modules = ["wo"] def _init_weights(self, module): """Initialize the weights""" factor = self.config.initializer_range if isinstance(module, nn.Conv2d) or isinstance(module, nn.Embedding) or isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=factor) if hasattr(module, "bias") and module.bias is not None: module.bias.data.zero_() if isinstance(module, Blip2VisionEmbeddings): if hasattr(self.config, "vision_config"): factor = self.config.vision_config.initializer_range nn.init.trunc_normal_(module.position_embedding, mean=0.0, std=factor) nn.init.trunc_normal_(module.class_embedding, mean=0.0, std=factor) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() BLIP_2_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`Blip2Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ BLIP_2_VISION_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`Blip2Processor`]. See [`Blip2Processor.__call__`] for details. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ BLIP_2_TEXT_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) T5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [T5 Training](./t5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ BLIP_2_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`Blip2Processor`]. See [`Blip2Processor.__call__`] for details. input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of input sequence tokens in the vocabulary of the language model. Input tokens can optionally be provided to serve as text prompt, which the language model can continue. Indices can be obtained using [`Blip2Processor`]. See [`Blip2Processor.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary of the language model. Only relevant in case an encoder-decoder language model (like T5) is used. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. Only relevant in case an encoder-decoder language model (like T5) is used. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ # Copied from transformers.models.blip.modeling_blip.BlipEncoder with Blip->Blip2 class Blip2Encoder(nn.Module): """ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`Blip2EncoderLayer`]. Args: config (`Blip2Config`): The corresponding vision configuration for the `Blip2Encoder`. """ def __init__(self, config: Blip2Config): super().__init__() self.config = config self.layers = nn.ModuleList([Blip2EncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, inputs_embeds, attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutput]: r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Embedded representation of the inputs. Should be float, not int tokens. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( encoder_layer.__call__, hidden_states, attention_mask, output_attentions, ) else: layer_outputs = encoder_layer( hidden_states, attention_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) # Copied from transformers.models.blip.modeling_blip.BlipVisionModel with Blip->Blip2, BLIP->BLIP_2 class Blip2VisionModel(Blip2PreTrainedModel): main_input_name = "pixel_values" config_class = Blip2VisionConfig def __init__(self, config: Blip2VisionConfig): super().__init__(config) self.config = config embed_dim = config.hidden_size self.embeddings = Blip2VisionEmbeddings(config) self.encoder = Blip2Encoder(config) self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.post_init() @add_start_docstrings_to_model_forward(BLIP_2_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=Blip2VisionConfig) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" Returns: """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") hidden_states = self.embeddings(pixel_values) encoder_outputs = self.encoder( inputs_embeds=hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = encoder_outputs[0] last_hidden_state = self.post_layernorm(last_hidden_state) pooled_output = last_hidden_state[:, 0, :] pooled_output = self.post_layernorm(pooled_output) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) def get_input_embeddings(self): return self.embeddings class Blip2QFormerMultiHeadAttention(nn.Module): def __init__(self, config, is_cross_attention=False): super().__init__() self.config = config if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( "The hidden size (%d) is not a multiple of the number of attention heads (%d)" % (config.hidden_size, config.num_attention_heads) ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) if is_cross_attention: self.key = nn.Linear(config.encoder_hidden_size, self.all_head_size) self.value = nn.Linear(config.encoder_hidden_size, self.all_head_size) else: self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.save_attention = False def save_attn_gradients(self, attn_gradients): self.attn_gradients = attn_gradients def get_attn_gradients(self): return self.attn_gradients def save_attention_map(self, attention_map): self.attention_map = attention_map def get_attention_map(self): return self.attention_map def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) mixed_query_layer = self.query(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": seq_length = hidden_states.size()[1] position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility if self.position_embedding_type == "relative_key": relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == "relative_key_query": relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in BertModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.Softmax(dim=-1)(attention_scores) if is_cross_attention and self.save_attention: self.save_attention_map(attention_probs) attention_probs.register_hook(self.save_attn_gradients) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs_dropped = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs_dropped = attention_probs_dropped * head_mask context_layer = torch.matmul(attention_probs_dropped, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) outputs = outputs + (past_key_value,) return outputs # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->Blip2QFormer class Blip2QFormerSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class Blip2QFormerAttention(nn.Module): def __init__(self, config, is_cross_attention=False): super().__init__() self.attention = Blip2QFormerMultiHeadAttention(config, is_cross_attention) self.output = Blip2QFormerSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads ) # Prune linear layers self.attention.query = prune_linear_layer(self.attention.query, index) self.attention.key = prune_linear_layer(self.attention.key, index) self.attention.value = prune_linear_layer(self.attention.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads) self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: self_outputs = self.attention( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Blip2QFormer class Blip2QFormerIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->Blip2QFormer class Blip2QFormerOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class Blip2QFormerLayer(nn.Module): def __init__(self, config, layer_idx): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = Blip2QFormerAttention(config) self.layer_idx = layer_idx if layer_idx % config.cross_attention_frequency == 0: self.crossattention = Blip2QFormerAttention(config, is_cross_attention=True) self.has_cross_attention = True else: self.has_cross_attention = False self.intermediate_query = Blip2QFormerIntermediate(config) self.output_query = Blip2QFormerOutput(config) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, query_length=0, ): # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] if query_length > 0: query_attention_output = attention_output[:, :query_length, :] if self.has_cross_attention: if encoder_hidden_states is None: raise ValueError("encoder_hidden_states must be given for cross-attention layers") cross_attention_outputs = self.crossattention( query_attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, output_attentions=output_attentions, ) query_attention_output = cross_attention_outputs[0] # add cross attentions if we output attention weights outputs = outputs + cross_attention_outputs[1:-1] layer_output = apply_chunking_to_forward( self.feed_forward_chunk_query, self.chunk_size_feed_forward, self.seq_len_dim, query_attention_output, ) if attention_output.shape[1] > query_length: layer_output_text = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output[:, query_length:, :], ) layer_output = torch.cat([layer_output, layer_output_text], dim=1) else: layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output, ) outputs = (layer_output,) + outputs outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output def feed_forward_chunk_query(self, attention_output): intermediate_output = self.intermediate_query(attention_output) layer_output = self.output_query(intermediate_output, attention_output) return layer_output class Blip2QFormerEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList( [Blip2QFormerLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self.gradient_checkpointing = False def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, return_dict=True, query_length=0, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions else None next_decoder_cache = () if use_cache else None for i in range(self.config.num_hidden_layers): layer_module = self.layer[i] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if getattr(self.config, "gradient_checkpointing", False) and self.training: if use_cache: logger.warning( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, ) else: layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, query_length, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if layer_module.has_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) class Blip2QFormerModel(Blip2PreTrainedModel): """ Querying Transformer (Q-Former), used in BLIP-2. """ def __init__(self, config: Blip2QFormerConfig): super().__init__(config) self.config = config self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.encoder = Blip2QFormerEncoder(config) self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def get_extended_attention_mask( self, attention_mask: torch.Tensor, input_shape: Tuple[int], device: torch.device, has_query: bool = False, ) -> torch.Tensor: """ Makes broadcastable attention and causal masks so that future and masked tokens are ignored. Arguments: attention_mask (`torch.Tensor`): Mask with ones indicating tokens to attend to, zeros for tokens to ignore. input_shape (`Tuple[int]`): The shape of the input to the model. device (`torch.device`): The device of the input to the model. Returns: `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`. """ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. if attention_mask.dim() == 3: extended_attention_mask = attention_mask[:, None, :, :] elif attention_mask.dim() == 2: # Provided a padding mask of dimensions [batch_size, seq_length] # - the model is an encoder, so make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length] extended_attention_mask = attention_mask[:, None, None, :] else: raise ValueError( "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format( input_shape, attention_mask.shape ) ) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 return extended_attention_mask def forward( self, query_embeds: torch.FloatTensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, `optional`): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of: shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, `optional`): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # past_key_values_length past_key_values_length = ( past_key_values[0][0].shape[2] - self.config.query_length if past_key_values is not None else 0 ) query_length = query_embeds.shape[1] if query_embeds is not None else 0 embedding_output = self.layernorm(query_embeds) embedding_output = self.dropout(embedding_output) input_shape = embedding_output.size()[:-1] batch_size, seq_length = input_shape device = embedding_output.device if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, device) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_hidden_states is not None: if isinstance(encoder_hidden_states, list): encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size() else: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if isinstance(encoder_attention_mask, list): encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask] elif encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, query_length=query_length, ) sequence_output = encoder_outputs[0] pooled_output = sequence_output[:, 0, :] if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) @add_start_docstrings( """ BLIP-2 Model for generating text and image features. The model consists of a vision encoder, Querying Transformer (Q-Former) and a language model. """, BLIP_2_START_DOCSTRING, ) class Blip2Model(Blip2PreTrainedModel): config_class = Blip2Config main_input_name = "pixel_values" def __init__(self, config: Blip2Config): super().__init__(config) self.vision_model = Blip2VisionModel(config.vision_config) self.query_tokens = nn.Parameter(torch.zeros(1, config.num_query_tokens, config.qformer_config.hidden_size)) self.qformer = Blip2QFormerModel(config.qformer_config) self.language_projection = nn.Linear(config.qformer_config.hidden_size, config.text_config.hidden_size) if config.use_decoder_only_language_model: language_model = AutoModelForCausalLM.from_config(config.text_config) else: language_model = AutoModelForSeq2SeqLM.from_config(config.text_config) # Update _tied_weights_keys using the base model used. if language_model._tied_weights_keys is not None: self._tied_weights_keys = [f"language_model.{k}" for k in language_model._tied_weights_keys] self.language_model = language_model # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.language_model.get_input_embeddings() def set_input_embeddings(self, value): self.language_model.set_input_embeddings(value) def set_output_embeddings(self, new_embeddings): self.language_model.set_output_embeddings(new_embeddings) def get_output_embeddings(self) -> nn.Module: return self.language_model.get_output_embeddings() def get_encoder(self): return self.language_model.get_encoder() def get_decoder(self): return self.language_model.get_decoder() def _tie_weights(self): if not self.config.use_decoder_only_language_model: self.language_model.encoder.embed_tokens = self.language_model.shared self.language_model.decoder.embed_tokens = self.language_model.shared @add_start_docstrings_to_model_forward(BLIP_2_TEXT_INPUTS_DOCSTRING) def get_text_features( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.Tensor] = None, decoder_attention_mask: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): r""" Returns: text_outputs (`CausalLMOutputWithPast`, or `tuple(torch.FloatTensor)` if `return_dict=False`): The language model outputs. If `return_dict=True`, the output is a [`CausalLMOutputWithPast`] that contains the language model logits, the past key values and the hidden states if `output_hidden_states=True`. Examples: ```python >>> import torch >>> from transformers import AutoTokenizer, Blip2Model >>> model = Blip2Model.from_pretrained("Salesforce/blip2-opt-2.7b") >>> tokenizer = AutoTokenizer.from_pretrained("Salesforce/blip2-opt-2.7b") >>> inputs = tokenizer(["a photo of a cat"], padding=True, return_tensors="pt") >>> text_features = model.get_text_features(**inputs) ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.use_decoder_only_language_model: text_outputs = self.language_model( input_ids=input_ids, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) else: inputs_embeds = self.language_model.get_input_embeddings()(input_ids) text_outputs = self.language_model( inputs_embeds=inputs_embeds, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, labels=labels, ) return text_outputs @add_start_docstrings_to_model_forward(BLIP_2_VISION_INPUTS_DOCSTRING) def get_image_features( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): r""" Returns: vision_outputs (`BaseModelOutputWithPooling` or tuple of `torch.FloatTensor`): The vision model outputs. If `return_dict=True`, the output is a [`BaseModelOutputWithPooling`] that contains the image features, the pooled image features and the hidden states if `output_hidden_states=True`. Examples: ```python >>> import torch >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, Blip2Model >>> model = Blip2Model.from_pretrained("Salesforce/blip2-opt-2.7b") >>> processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-2.7b") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> image_outputs = model.get_image_features(**inputs) ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) return vision_outputs @add_start_docstrings_to_model_forward(BLIP_2_INPUTS_DOCSTRING) def get_qformer_features( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): r""" Returns: vision_outputs (`BaseModelOutputWithPooling` or tuple of `torch.FloatTensor`): The vision model outputs. If `return_dict=True`, the output is a [`BaseModelOutputWithPooling`] that contains the image features, the pooled image features and the hidden states if `output_hidden_states=True`. Examples: ```python >>> import torch >>> from PIL import Image >>> import requests >>> from transformers import Blip2Processor, Blip2Model >>> processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b") >>> model = Blip2Model.from_pretrained("Salesforce/blip2-opt-2.7b") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> qformer_outputs = model.get_qformer_features(**inputs) ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) image_embeds = vision_outputs[0] # step 2: forward the query tokens through the QFormer, using the image embeddings for cross-attention image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device) query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) query_outputs = self.qformer( query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) return query_outputs @add_start_docstrings_to_model_forward(BLIP_2_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Blip2ForConditionalGenerationModelOutput, config_class=Blip2VisionConfig) def forward( self, pixel_values: torch.FloatTensor, input_ids: torch.FloatTensor, attention_mask: Optional[torch.LongTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, labels: Optional[torch.LongTensor] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, Blip2ForConditionalGenerationModelOutput]: r""" Returns: Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import Blip2Processor, Blip2Model >>> import torch >>> device = "cuda" if torch.cuda.is_available() else "cpu" >>> processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b") >>> model = Blip2Model.from_pretrained("Salesforce/blip2-opt-2.7b", torch_dtype=torch.float16) >>> model.to(device) # doctest: +IGNORE_RESULT >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> prompt = "Question: how many cats are there? Answer:" >>> inputs = processor(images=image, text=prompt, return_tensors="pt").to(device, torch.float16) >>> outputs = model(**inputs) ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict # step 1: forward the images through the vision encoder, # to get image embeddings of shape (batch_size, seq_len, hidden_size) vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) image_embeds = vision_outputs[0] # step 2: forward the query tokens through the QFormer, using the image embeddings for cross-attention image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device) query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) query_outputs = self.qformer( query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) query_output = query_outputs[0] # step 3: use the language model, conditioned on the query outputs and the prompt language_model_inputs = self.language_projection(query_output) language_model_attention_mask = torch.ones( language_model_inputs.size()[:-1], dtype=torch.long, device=language_model_inputs.device ) inputs_embeds = self.language_model.get_input_embeddings()(input_ids) inputs_embeds = torch.cat([language_model_inputs, inputs_embeds], dim=1) if attention_mask is None: attention_mask = torch.ones_like(input_ids) expected_device = language_model_attention_mask.device attention_mask = torch.cat([language_model_attention_mask, attention_mask.to(expected_device)], dim=1) if self.config.use_decoder_only_language_model: outputs = self.language_model( inputs_embeds=inputs_embeds, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) logits = outputs.logits if return_dict else outputs[0] loss = None # we compute the loss here since we need to take into account the sequence length of the query embeds if labels is not None: labels = labels.to(logits.device) logits = logits[:, -labels.size(1) :, :] # Shift so that tokens < n predict n shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous().to(logits.device) # Flatten the tokens loss_fct = CrossEntropyLoss(reduction="mean") loss = loss_fct(shift_logits.view(-1, self.config.text_config.vocab_size), shift_labels.view(-1)) else: outputs = self.language_model( inputs_embeds=inputs_embeds, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, labels=labels, ) loss = outputs.loss if return_dict else outputs[0] logits = outputs.logits if return_dict else outputs[1] if not return_dict: output = (logits, vision_outputs, query_outputs, outputs) return ((loss,) + output) if loss is not None else output return Blip2ForConditionalGenerationModelOutput( loss=loss, logits=logits, vision_outputs=vision_outputs, qformer_outputs=query_outputs, language_model_outputs=outputs, ) @add_start_docstrings( """ BLIP-2 Model for generating text given an image and an optional text prompt. The model consists of a vision encoder, Querying Transformer (Q-Former) and a language model. One can optionally pass `input_ids` to the model, which serve as a text prompt, to make the language model continue the prompt. Otherwise, the language model starts generating text from the [BOS] (beginning-of-sequence) token. <Tip> Note that Flan-T5 checkpoints cannot be cast to float16. They are pre-trained using bfloat16. </Tip> """, BLIP_2_START_DOCSTRING, ) class Blip2ForConditionalGeneration(Blip2PreTrainedModel): config_class = Blip2Config main_input_name = "pixel_values" def __init__(self, config: Blip2Config): super().__init__(config) self.vision_model = Blip2VisionModel(config.vision_config) self.query_tokens = nn.Parameter(torch.zeros(1, config.num_query_tokens, config.qformer_config.hidden_size)) self.qformer = Blip2QFormerModel(config.qformer_config) self.language_projection = nn.Linear(config.qformer_config.hidden_size, config.text_config.hidden_size) if config.use_decoder_only_language_model: language_model = AutoModelForCausalLM.from_config(config.text_config) else: language_model = AutoModelForSeq2SeqLM.from_config(config.text_config) # Update _tied_weights_keys using the base model used. if language_model._tied_weights_keys is not None: self._tied_weights_keys = [f"language_model.{k}" for k in language_model._tied_weights_keys] self.language_model = language_model # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.language_model.get_input_embeddings() def set_input_embeddings(self, value): self.language_model.set_input_embeddings(value) def set_output_embeddings(self, new_embeddings): self.language_model.set_output_embeddings(new_embeddings) def get_output_embeddings(self) -> nn.Module: return self.language_model.get_output_embeddings() def get_encoder(self): return self.language_model.get_encoder() def get_decoder(self): return self.language_model.get_decoder() def _tie_weights(self): if not self.config.use_decoder_only_language_model: self.language_model.encoder.embed_tokens = self.language_model.shared self.language_model.decoder.embed_tokens = self.language_model.shared def _preprocess_accelerate(self): r""" Some pre-processing hacks to make the model `accelerate` compatible. Check https://github.com/huggingface/transformers/pull/21707 for more details. """ hf_device_map = self.hf_device_map if len(hf_device_map) > 1 and "language_model" not in hf_device_map and torch.cuda.device_count() > 1: # warn users about unexpected behavior when using multi-GPU + BLIP-2 + `accelerate`. logger.warning( "The `language_model` is not in the `hf_device_map` dictionary and you are running your script" " in a multi-GPU environment. this may lead to unexpected behavior when using `accelerate`." " Please pass a `device_map` that contains `language_model` to remove this warning." " Please refer to https://github.com/huggingface/blog/blob/main/accelerate-large-models.md for" " more details on creating a `device_map` for large models.", ) if hasattr(self.language_model, "_hf_hook"): self.language_model._hf_hook.io_same_device = True # For `generate` compatibility @add_start_docstrings_to_model_forward(BLIP_2_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Blip2ForConditionalGenerationModelOutput, config_class=Blip2VisionConfig) def forward( self, pixel_values: torch.FloatTensor, input_ids: torch.FloatTensor, attention_mask: Optional[torch.LongTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, labels: Optional[torch.LongTensor] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, Blip2ForConditionalGenerationModelOutput]: r""" Returns: Examples: Prepare processor, model and image input ```python >>> from PIL import Image >>> import requests >>> from transformers import Blip2Processor, Blip2ForConditionalGeneration >>> import torch >>> device = "cuda" if torch.cuda.is_available() else "cpu" >>> processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b") >>> model = Blip2ForConditionalGeneration.from_pretrained( ... "Salesforce/blip2-opt-2.7b", load_in_8bit=True, device_map={"": 0}, torch_dtype=torch.float16 ... ) # doctest: +IGNORE_RESULT >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) ``` Image captioning (without providing a text prompt): ```python >>> inputs = processor(images=image, return_tensors="pt").to(device, torch.float16) >>> generated_ids = model.generate(**inputs) >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip() >>> print(generated_text) two cats laying on a couch ``` Visual question answering (prompt = question): ```python >>> prompt = "Question: how many cats are there? Answer:" >>> inputs = processor(images=image, text=prompt, return_tensors="pt").to(device="cuda", dtype=torch.float16) >>> generated_ids = model.generate(**inputs) >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip() >>> print(generated_text) two ``` Note that int8 inference is also supported through [bitsandbytes](https://github.com/TimDettmers/bitsandbytes). This greatly reduces the amount of memory used by the model while maintaining the same performance. ```python >>> model = Blip2ForConditionalGeneration.from_pretrained( ... "Salesforce/blip2-opt-2.7b", load_in_8bit=True, device_map={"": 0}, torch_dtype=torch.bfloat16 ... ) # doctest: +IGNORE_RESULT >>> inputs = processor(images=image, text=prompt, return_tensors="pt").to(device="cuda", dtype=torch.bfloat16) >>> generated_ids = model.generate(**inputs) >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip() >>> print(generated_text) two ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict # step 1: forward the images through the vision encoder, # to get image embeddings of shape (batch_size, seq_len, hidden_size) vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) image_embeds = vision_outputs[0] # step 2: forward the query tokens through the QFormer, using the image embeddings for cross-attention image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device) query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) query_outputs = self.qformer( query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) query_output = query_outputs[0] # step 3: use the language model, conditioned on the query outputs and the prompt language_model_inputs = self.language_projection(query_output) language_model_attention_mask = torch.ones( language_model_inputs.size()[:-1], dtype=torch.long, device=language_model_inputs.device ) inputs_embeds = self.language_model.get_input_embeddings()(input_ids) inputs_embeds = torch.cat([language_model_inputs, inputs_embeds.to(language_model_inputs.device)], dim=1) if attention_mask is None: attention_mask = torch.ones_like(input_ids) expected_device = language_model_attention_mask.device attention_mask = torch.cat([language_model_attention_mask, attention_mask.to(expected_device)], dim=1) if self.config.use_decoder_only_language_model: outputs = self.language_model( inputs_embeds=inputs_embeds, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) logits = outputs.logits if return_dict else outputs[0] loss = None # we compute the loss here since we need to take into account the sequence length of the query embeds if labels is not None: labels = labels.to(logits.device) logits = logits[:, -labels.size(1) :, :] # Shift so that tokens < n predict n shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous().to(logits.device) # Flatten the tokens loss_fct = CrossEntropyLoss(reduction="mean") loss = loss_fct(shift_logits.view(-1, self.config.text_config.vocab_size), shift_labels.view(-1)) else: outputs = self.language_model( inputs_embeds=inputs_embeds, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, labels=labels, ) loss = outputs.loss if return_dict else outputs[0] logits = outputs.logits if return_dict else outputs[1] if not return_dict: output = (logits, vision_outputs, query_outputs, outputs) return ((loss,) + output) if loss is not None else output return Blip2ForConditionalGenerationModelOutput( loss=loss, logits=logits, vision_outputs=vision_outputs, qformer_outputs=query_outputs, language_model_outputs=outputs, ) @torch.no_grad() def generate( self, pixel_values: torch.FloatTensor, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.LongTensor] = None, **generate_kwargs, ) -> torch.LongTensor: """ Overrides `generate` function to be able to use the model as a conditional generator. Args: pixel_values (`torch.FloatTensor` of shape (batch_size, num_channels, height, width)): Input images to be processed. input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*): The sequence used as a prompt for the generation. attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*): Mask to avoid performing attention on padding token indices Returns: captions (list): A list of strings of length batch_size * num_captions. """ if hasattr(self, "hf_device_map"): # preprocess for `accelerate` self._preprocess_accelerate() batch_size = pixel_values.shape[0] image_embeds = self.vision_model(pixel_values, return_dict=True).last_hidden_state image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device) query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) query_outputs = self.qformer( query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, return_dict=True, ) query_output = query_outputs.last_hidden_state language_model_inputs = self.language_projection(query_output) language_attention_mask = torch.ones( language_model_inputs.size()[:-1], dtype=torch.long, device=language_model_inputs.device ) if input_ids is None: input_ids = ( torch.LongTensor([[self.config.text_config.bos_token_id]]) .repeat(batch_size, 1) .to(image_embeds.device) ) if attention_mask is None: attention_mask = torch.ones_like(input_ids) attention_mask = torch.cat([language_attention_mask, attention_mask.to(language_attention_mask.device)], dim=1) # concatenate query embeddings with prompt embeddings inputs_embeds = self.get_input_embeddings()(input_ids) inputs_embeds = torch.cat([language_model_inputs, inputs_embeds.to(language_model_inputs.device)], dim=1) # add image_embeds length to max_length, so that the final max_length in counted only on token embeds if not self.language_model.config.is_encoder_decoder: generate_kwargs["max_length"] = generate_kwargs.get("max_length", 20) + language_model_inputs.shape[1] generate_kwargs["min_length"] = generate_kwargs.get("min_length", 0) + language_model_inputs.shape[1] outputs = self.language_model.generate( inputs_embeds=inputs_embeds, attention_mask=attention_mask, **generate_kwargs, ) return outputs
transformers/src/transformers/models/blip_2/modeling_blip_2.py/0
{ "file_path": "transformers/src/transformers/models/blip_2/modeling_blip_2.py", "repo_id": "transformers", "token_count": 35175 }
333
# coding=utf-8 # Copyright 2023-present NAVER Corp, The Microsoft Research Asia LayoutLM Team Authors and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Bros model.""" import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_bros import BrosConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "jinho8345/bros-base-uncased" _CONFIG_FOR_DOC = "BrosConfig" BROS_PRETRAINED_MODEL_ARCHIVE_LIST = [ "jinho8345/bros-base-uncased", "jinho8345/bros-large-uncased", # See all Bros models at https://huggingface.co/models?filter=bros ] BROS_START_DOCSTRING = r""" This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`BrosConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ BROS_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`BrosProcessor`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) bbox ('torch.FloatTensor' of shape '(batch_size, num_boxes, 4)'): Bounding box coordinates for each token in the input sequence. Each bounding box is a list of four values (x1, y1, x2, y2), where (x1, y1) is the top left corner, and (x2, y2) is the bottom right corner of the bounding box. attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) bbox_first_token_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to indicate the first token of each bounding box. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ @dataclass class BrosSpadeOutput(ModelOutput): """ Base class for outputs of token classification models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided) : Classification loss. initial_token_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`): Classification scores for entity initial tokens (before SoftMax). subsequent_token_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, sequence_length+1)`): Classification scores for entity sequence tokens (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None initial_token_logits: torch.FloatTensor = None subsequent_token_logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None class BrosPositionalEmbedding1D(nn.Module): # Reference: https://github.com/kimiyoung/transformer-xl/blob/master/pytorch/mem_transformer.py#L15 def __init__(self, config): super(BrosPositionalEmbedding1D, self).__init__() self.dim_bbox_sinusoid_emb_1d = config.dim_bbox_sinusoid_emb_1d inv_freq = 1 / ( 10000 ** (torch.arange(0.0, self.dim_bbox_sinusoid_emb_1d, 2.0) / self.dim_bbox_sinusoid_emb_1d) ) self.register_buffer("inv_freq", inv_freq) def forward(self, pos_seq: torch.Tensor) -> torch.Tensor: seq_size = pos_seq.size() b1, b2, b3 = seq_size sinusoid_inp = pos_seq.view(b1, b2, b3, 1) * self.inv_freq.view(1, 1, 1, self.dim_bbox_sinusoid_emb_1d // 2) pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1) return pos_emb class BrosPositionalEmbedding2D(nn.Module): def __init__(self, config): super(BrosPositionalEmbedding2D, self).__init__() self.dim_bbox = config.dim_bbox self.x_pos_emb = BrosPositionalEmbedding1D(config) self.y_pos_emb = BrosPositionalEmbedding1D(config) def forward(self, bbox: torch.Tensor) -> torch.Tensor: stack = [] for i in range(self.dim_bbox): if i % 2 == 0: stack.append(self.x_pos_emb(bbox[..., i])) else: stack.append(self.y_pos_emb(bbox[..., i])) bbox_pos_emb = torch.cat(stack, dim=-1) return bbox_pos_emb class BrosBboxEmbeddings(nn.Module): def __init__(self, config): super(BrosBboxEmbeddings, self).__init__() self.bbox_sinusoid_emb = BrosPositionalEmbedding2D(config) self.bbox_projection = nn.Linear(config.dim_bbox_sinusoid_emb_2d, config.dim_bbox_projection, bias=False) def forward(self, bbox: torch.Tensor): bbox_t = bbox.transpose(0, 1) bbox_pos = bbox_t[None, :, :, :] - bbox_t[:, None, :, :] bbox_pos_emb = self.bbox_sinusoid_emb(bbox_pos) bbox_pos_emb = self.bbox_projection(bbox_pos_emb) return bbox_pos_emb class BrosTextEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) self.register_buffer( "token_type_ids", torch.zeros( self.position_ids.size(), dtype=torch.long, device=self.position_ids.device, ), persistent=False, ) def forward( self, input_ids: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, past_key_values_length: int = 0, ) -> torch.Tensor: if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] if token_type_ids is None: if hasattr(self, "token_type_ids"): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class BrosSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder def transpose_for_scores(self, x: torch.Tensor): new_x_shape = x.size()[:-1] + ( self.num_attention_heads, self.attention_head_size, ) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states: torch.Tensor, bbox_pos_emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[torch.Tensor] = False, ) -> Tuple[torch.Tensor]: mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": seq_length = hidden_states.size()[1] position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility if self.position_embedding_type == "relative_key": relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == "relative_key_query": relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key # bbox positional encoding batch_size, n_head, seq_length, d_head = query_layer.shape bbox_pos_emb = bbox_pos_emb.view(seq_length, seq_length, batch_size, d_head) bbox_pos_emb = bbox_pos_emb.permute([2, 0, 1, 3]) bbox_pos_scores = torch.einsum("bnid,bijd->bnij", (query_layer, bbox_pos_emb)) attention_scores = attention_scores + bbox_pos_scores attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in BrosModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.Softmax(dim=-1)(attention_scores) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->Bros class BrosSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BrosAttention(nn.Module): def __init__(self, config): super().__init__() self.self = BrosSelfAttention(config) self.output = BrosSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads, ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, bbox_pos_emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: self_outputs = self.self( hidden_states=hidden_states, bbox_pos_emb=bbox_pos_emb, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=past_key_value, output_attentions=output_attentions, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Bros class BrosIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class BrosOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BrosLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = BrosAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise Exception(f"{self} should be used as a decoder model if cross attention is added") self.crossattention = BrosAttention(config) self.intermediate = BrosIntermediate(config) self.output = BrosOutput(config) def forward( self, hidden_states: torch.Tensor, bbox_pos_emb: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, bbox_pos_emb=bbox_pos_emb, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if hasattr(self, "crossattention"): raise Exception( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`" ) # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output, ) outputs = (layer_output,) + outputs # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class BrosEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([BrosLayer(config) for _ in range(config.num_hidden_layers)]) def forward( self, hidden_states: torch.Tensor, bbox_pos_emb: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if getattr(self.config, "gradient_checkpointing", False) and self.training: if use_cache: logger.warning( "`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting " "`use_cache=False`..." ) use_cache = False layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, bbox_pos_emb, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, output_attentions, ) else: layer_outputs = layer_module( hidden_states=hidden_states, bbox_pos_emb=bbox_pos_emb, attention_mask=attention_mask, head_mask=layer_head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=past_key_value, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) # Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->Bros class BrosPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class BrosRelationExtractor(nn.Module): def __init__(self, config): super().__init__() self.n_relations = config.n_relations self.backbone_hidden_size = config.hidden_size self.head_hidden_size = config.hidden_size self.classifier_dropout_prob = config.classifier_dropout_prob self.drop = nn.Dropout(self.classifier_dropout_prob) self.query = nn.Linear(self.backbone_hidden_size, self.n_relations * self.head_hidden_size) self.key = nn.Linear(self.backbone_hidden_size, self.n_relations * self.head_hidden_size) self.dummy_node = nn.Parameter(torch.zeros(1, self.backbone_hidden_size)) def forward(self, query_layer: torch.Tensor, key_layer: torch.Tensor): query_layer = self.query(self.drop(query_layer)) dummy_vec = self.dummy_node.unsqueeze(0).repeat(1, key_layer.size(1), 1) key_layer = torch.cat([key_layer, dummy_vec], axis=0) key_layer = self.key(self.drop(key_layer)) query_layer = query_layer.view( query_layer.size(0), query_layer.size(1), self.n_relations, self.head_hidden_size ) key_layer = key_layer.view(key_layer.size(0), key_layer.size(1), self.n_relations, self.head_hidden_size) relation_score = torch.matmul( query_layer.permute(2, 1, 0, 3), key_layer.permute(2, 1, 3, 0) ) # equivalent to torch.einsum("ibnd,jbnd->nbij", (query_layer, key_layer)) return relation_score class BrosPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = BrosConfig base_model_prefix = "bros" def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) @add_start_docstrings( "The bare Bros Model transformer outputting raw hidden-states without any specific head on top.", BROS_START_DOCSTRING, ) class BrosModel(BrosPreTrainedModel): def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = BrosTextEmbeddings(config) self.bbox_embeddings = BrosBboxEmbeddings(config) self.encoder = BrosEncoder(config) self.pooler = BrosPooler(config) if add_pooling_layer else None self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(BROS_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.Tensor] = None, bbox: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: r""" Returns: Examples: ```python >>> import torch >>> from transformers import BrosProcessor, BrosModel >>> processor = BrosProcessor.from_pretrained("jinho8345/bros-base-uncased") >>> model = BrosModel.from_pretrained("jinho8345/bros-base-uncased") >>> encoding = processor("Hello, my dog is cute", add_special_tokens=False, return_tensors="pt") >>> bbox = torch.tensor([[[0, 0, 1, 1]]]).repeat(1, encoding["input_ids"].shape[-1], 1) >>> encoding["bbox"] = bbox >>> outputs = model(**encoding) >>> last_hidden_states = outputs.last_hidden_state ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if bbox is None: raise ValueError("You have to specify bbox") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: if hasattr(self.embeddings, "token_type_ids"): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, ) # if bbox has 2 points (4 float tensors) per token, convert it to 4 points (8 float tensors) per token if bbox.shape[-1] == 4: bbox = bbox[:, :, [0, 1, 2, 1, 2, 3, 0, 3]] scaled_bbox = bbox * self.config.bbox_scale bbox_position_embeddings = self.bbox_embeddings(scaled_bbox) encoder_outputs = self.encoder( embedding_output, bbox_pos_emb=bbox_position_embeddings, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) @add_start_docstrings( """ Bros Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, BROS_START_DOCSTRING, ) class BrosForTokenClassification(BrosPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r"pooler"] def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.bros = BrosModel(config) classifier_dropout = ( config.classifier_dropout if hasattr(config, "classifier_dropout") else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() @add_start_docstrings_to_model_forward(BROS_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.Tensor] = None, bbox: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, bbox_first_token_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]: r""" Returns: Examples: ```python >>> import torch >>> from transformers import BrosProcessor, BrosForTokenClassification >>> processor = BrosProcessor.from_pretrained("jinho8345/bros-base-uncased") >>> model = BrosForTokenClassification.from_pretrained("jinho8345/bros-base-uncased") >>> encoding = processor("Hello, my dog is cute", add_special_tokens=False, return_tensors="pt") >>> bbox = torch.tensor([[[0, 0, 1, 1]]]).repeat(1, encoding["input_ids"].shape[-1], 1) >>> encoding["bbox"] = bbox >>> outputs = model(**encoding) ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bros( input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() if bbox_first_token_mask is not None: bbox_first_token_mask = bbox_first_token_mask.view(-1) loss = loss_fct( logits.view(-1, self.num_labels)[bbox_first_token_mask], labels.view(-1)[bbox_first_token_mask] ) else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ Bros Model with a token classification head on top (initial_token_layers and subsequent_token_layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. The initial_token_classifier is used to predict the first token of each entity, and the subsequent_token_classifier is used to predict the subsequent tokens within an entity. Compared to BrosForTokenClassification, this model is more robust to serialization errors since it predicts next token from one token. """, BROS_START_DOCSTRING, ) class BrosSpadeEEForTokenClassification(BrosPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r"pooler"] def __init__(self, config): super().__init__(config) self.config = config self.num_labels = config.num_labels self.n_relations = config.n_relations self.backbone_hidden_size = config.hidden_size self.bros = BrosModel(config) classifier_dropout = ( config.classifier_dropout if hasattr(config, "classifier_dropout") else config.hidden_dropout_prob ) # Initial token classification for Entity Extraction (NER) self.initial_token_classifier = nn.Sequential( nn.Dropout(classifier_dropout), nn.Linear(config.hidden_size, config.hidden_size), nn.Dropout(classifier_dropout), nn.Linear(config.hidden_size, config.num_labels), ) # Subsequent token classification for Entity Extraction (NER) self.subsequent_token_classifier = BrosRelationExtractor(config) self.init_weights() @add_start_docstrings_to_model_forward(BROS_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=BrosSpadeOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.Tensor] = None, bbox: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, bbox_first_token_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, initial_token_labels: Optional[torch.Tensor] = None, subsequent_token_labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], BrosSpadeOutput]: r""" Returns: Examples: ```python >>> import torch >>> from transformers import BrosProcessor, BrosSpadeEEForTokenClassification >>> processor = BrosProcessor.from_pretrained("jinho8345/bros-base-uncased") >>> model = BrosSpadeEEForTokenClassification.from_pretrained("jinho8345/bros-base-uncased") >>> encoding = processor("Hello, my dog is cute", add_special_tokens=False, return_tensors="pt") >>> bbox = torch.tensor([[[0, 0, 1, 1]]]).repeat(1, encoding["input_ids"].shape[-1], 1) >>> encoding["bbox"] = bbox >>> outputs = model(**encoding) ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bros( input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_states = outputs[0] last_hidden_states = last_hidden_states.transpose(0, 1).contiguous() initial_token_logits = self.initial_token_classifier(last_hidden_states).transpose(0, 1).contiguous() subsequent_token_logits = self.subsequent_token_classifier(last_hidden_states, last_hidden_states).squeeze(0) # make subsequent token (sequence token classification) mask inv_attention_mask = 1 - attention_mask batch_size, max_seq_length = inv_attention_mask.shape device = inv_attention_mask.device invalid_token_mask = torch.cat([inv_attention_mask, torch.zeros([batch_size, 1]).to(device)], axis=1).bool() subsequent_token_logits = subsequent_token_logits.masked_fill( invalid_token_mask[:, None, :], torch.finfo(subsequent_token_logits.dtype).min ) self_token_mask = torch.eye(max_seq_length, max_seq_length + 1).to(device).bool() subsequent_token_logits = subsequent_token_logits.masked_fill( self_token_mask[None, :, :], torch.finfo(subsequent_token_logits.dtype).min ) subsequent_token_mask = attention_mask.view(-1).bool() loss = None if initial_token_labels is not None and subsequent_token_labels is not None: loss_fct = CrossEntropyLoss() # get initial token loss initial_token_labels = initial_token_labels.view(-1) if bbox_first_token_mask is not None: bbox_first_token_mask = bbox_first_token_mask.view(-1) initial_token_loss = loss_fct( initial_token_logits.view(-1, self.num_labels)[bbox_first_token_mask], initial_token_labels[bbox_first_token_mask], ) else: initial_token_loss = loss_fct(initial_token_logits.view(-1, self.num_labels), initial_token_labels) subsequent_token_labels = subsequent_token_labels.view(-1) subsequent_token_loss = loss_fct( subsequent_token_logits.view(-1, max_seq_length + 1)[subsequent_token_mask], subsequent_token_labels[subsequent_token_mask], ) loss = initial_token_loss + subsequent_token_loss if not return_dict: output = (initial_token_logits, subsequent_token_logits) + outputs[2:] return ((loss,) + output) if loss is not None else output return BrosSpadeOutput( loss=loss, initial_token_logits=initial_token_logits, subsequent_token_logits=subsequent_token_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ Bros Model with a token classification head on top (a entity_linker layer on top of the hidden-states output) e.g. for Entity-Linking. The entity_linker is used to predict intra-entity links (one entity to another entity). """, BROS_START_DOCSTRING, ) class BrosSpadeELForTokenClassification(BrosPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r"pooler"] def __init__(self, config): super().__init__(config) self.config = config self.num_labels = config.num_labels self.n_relations = config.n_relations self.backbone_hidden_size = config.hidden_size self.bros = BrosModel(config) (config.classifier_dropout if hasattr(config, "classifier_dropout") else config.hidden_dropout_prob) self.entity_linker = BrosRelationExtractor(config) self.init_weights() @add_start_docstrings_to_model_forward(BROS_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.Tensor] = None, bbox: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, bbox_first_token_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]: r""" Returns: Examples: ```python >>> import torch >>> from transformers import BrosProcessor, BrosSpadeELForTokenClassification >>> processor = BrosProcessor.from_pretrained("jinho8345/bros-base-uncased") >>> model = BrosSpadeELForTokenClassification.from_pretrained("jinho8345/bros-base-uncased") >>> encoding = processor("Hello, my dog is cute", add_special_tokens=False, return_tensors="pt") >>> bbox = torch.tensor([[[0, 0, 1, 1]]]).repeat(1, encoding["input_ids"].shape[-1], 1) >>> encoding["bbox"] = bbox >>> outputs = model(**encoding) ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bros( input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_states = outputs[0] last_hidden_states = last_hidden_states.transpose(0, 1).contiguous() logits = self.entity_linker(last_hidden_states, last_hidden_states).squeeze(0) loss = None if labels is not None: loss_fct = CrossEntropyLoss() batch_size, max_seq_length = attention_mask.shape device = attention_mask.device self_token_mask = torch.eye(max_seq_length, max_seq_length + 1).to(device).bool() mask = bbox_first_token_mask.view(-1) bbox_first_token_mask = torch.cat( [ ~bbox_first_token_mask, torch.zeros([batch_size, 1], dtype=torch.bool).to(device), ], axis=1, ) logits = logits.masked_fill(bbox_first_token_mask[:, None, :], torch.finfo(logits.dtype).min) logits = logits.masked_fill(self_token_mask[None, :, :], torch.finfo(logits.dtype).min) loss = loss_fct(logits.view(-1, max_seq_length + 1)[mask], labels.view(-1)[mask]) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
transformers/src/transformers/models/bros/modeling_bros.py/0
{ "file_path": "transformers/src/transformers/models/bros/modeling_bros.py", "repo_id": "transformers", "token_count": 25108 }
334